code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
from __future__ import unicode_literals
from django import forms
from mezzanine.blog.models import BlogPost
from mezzanine.core.models import CONTENT_STATUS_DRAFT
# These fields need to be in the form, hidden, with default values,
# since it posts to the blog post admin, which includes these fields
# and will use empty values instead of the model defaults, without
# these specified.
hidden_field_defaults = ("status", "gen_description", "allow_comments")
class BlogPostForm(forms.ModelForm):
"""
Model form for ``BlogPost`` that provides the quick blog panel in the
admin dashboard.
"""
class Meta:
model = BlogPost
fields = ("title", "content") + hidden_field_defaults
def __init__(self):
initial = {}
for field in hidden_field_defaults:
initial[field] = BlogPost._meta.get_field(field).default
initial["status"] = CONTENT_STATUS_DRAFT
super(BlogPostForm, self).__init__(initial=initial)
for field in hidden_field_defaults:
self.fields[field].widget = forms.HiddenInput() | unknown | codeparrot/codeparrot-clean | ||
<!-- #docplaster -->
<nav>
<button type="button" (click)="toggle()">Toggle Open/Close</button>
</nav>
<div [@openClose]="isOpen() ? 'open' : 'closed'" class="open-close-container">
<p>The box is now {{ isOpen() ? 'Open' : 'Closed' }}!</p>
</div> | html | github | https://github.com/angular/angular | adev/src/content/examples/animations/src/app/animations-package/open-close.html |
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the "Elastic License
* 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side
* Public License v 1"; you may not use this file except in compliance with, at
* your election, the "Elastic License 2.0", the "GNU Affero General Public
* License v3.0 only", or the "Server Side Public License, v 1".
*/
package org.elasticsearch.benchmark.index.codec.tsdb.internal;
import java.util.Arrays;
import java.util.Random;
import java.util.function.Supplier;
/**
* Generates values from a small set of distinct values with skewed frequency.
*
* <p>Simulates metrics like HTTP status codes or enum fields where only
* a handful of distinct values appear with varying frequencies (Zipf distribution).
*/
public class LowCardinalitySupplier implements Supplier<long[]> {
private final Random random;
private final int size;
private final int distinctValues;
private final double skew;
private final long maxValue;
private LowCardinalitySupplier(Builder builder) {
this.random = new Random(builder.seed);
this.size = builder.size;
this.distinctValues = builder.distinctValues;
this.skew = builder.skew;
this.maxValue = builder.maxValue;
}
/**
* Returns a new builder for this supplier.
*
* @param seed random seed for reproducibility
* @param size number of values to generate
* @return a new builder instance
*/
public static Builder builder(int seed, int size) {
return new Builder(seed, size);
}
@Override
public long[] get() {
long[] values = generateDistinctValues();
double[] cumulativeWeights = computeZipfWeights();
return sampleValues(values, cumulativeWeights);
}
private long[] generateDistinctValues() {
long[] values = new long[distinctValues];
long step = maxValue / distinctValues;
for (int i = 0; i < distinctValues; i++) {
long base = i * step;
long jitter = step > 1 ? random.nextLong(step) : 0;
values[i] = base + jitter;
}
return values;
}
private double[] computeZipfWeights() {
double[] cumulativeWeights = new double[distinctValues];
double totalWeight = 0;
for (int i = 0; i < distinctValues; i++) {
totalWeight += 1.0 / Math.pow(i + 1, skew);
cumulativeWeights[i] = totalWeight;
}
return cumulativeWeights;
}
private long[] sampleValues(long[] values, double[] cumulativeWeights) {
final long[] data = new long[size];
double totalWeight = cumulativeWeights[cumulativeWeights.length - 1];
for (int i = 0; i < size; i++) {
double r = random.nextDouble() * totalWeight;
int idx = Arrays.binarySearch(cumulativeWeights, r);
if (idx < 0) {
idx = -idx - 1;
}
data[i] = values[idx];
}
return data;
}
/**
* Returns the number of bits required to represent the maximum possible value.
*
* @return nominal bits per value based on configured maxValue
*/
public int getNominalBitsPerValue() {
return 64 - Long.numberOfLeadingZeros(maxValue - 1);
}
/** Builder for {@link LowCardinalitySupplier}. */
public static class Builder {
private final int seed;
private final int size;
private int distinctValues = 10;
private double skew = 2.0;
private long maxValue = 10_000L;
private Builder(int seed, int size) {
assert size >= 1 : "size must be positive";
this.seed = seed;
this.size = size;
}
/**
* Sets the number of distinct values.
*
* @param distinctValues number of distinct values (must be positive)
* @return this builder
*/
public Builder withDistinctValues(int distinctValues) {
assert distinctValues >= 1 : "distinctValues must be positive";
this.distinctValues = distinctValues;
return this;
}
/**
* Sets the Zipf skew parameter.
*
* @param skew the skew parameter (must be non-negative)
* @return this builder
*/
public Builder withSkew(double skew) {
assert skew >= 0.0 : "skew must be non-negative";
this.skew = skew;
return this;
}
/**
* Sets the maximum value range.
*
* @param maxValue the maximum value
* @return this builder
*/
public Builder withMaxValue(long maxValue) {
this.maxValue = maxValue;
return this;
}
/**
* Builds the supplier.
*
* @return the configured supplier
*/
public LowCardinalitySupplier build() {
assert maxValue >= distinctValues : "maxValue must be >= distinctValues";
return new LowCardinalitySupplier(this);
}
}
} | java | github | https://github.com/elastic/elasticsearch | benchmarks/src/main/java/org/elasticsearch/benchmark/index/codec/tsdb/internal/LowCardinalitySupplier.java |
# Copyright (c) 2014 VMware, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo_vmware import vim_util
from nova import exception
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vm_util
ResultSet = collections.namedtuple('ResultSet', ['objects'])
ObjectContent = collections.namedtuple('ObjectContent', ['obj', 'propSet'])
DynamicProperty = collections.namedtuple('DynamicProperty', ['name', 'val'])
class GetNetworkWithTheNameTestCase(test.NoDBTestCase):
def setUp(self):
super(GetNetworkWithTheNameTestCase, self).setUp()
fake.reset()
self.stubs.Set(driver.VMwareAPISession, "vim", stubs.fake_vim_prop)
self.stubs.Set(driver.VMwareAPISession, "_is_vim_object",
stubs.fake_is_vim_object)
self._session = driver.VMwareAPISession()
def _build_cluster_networks(self, networks):
"""Returns a set of results for a cluster network lookup.
This is an example:
(ObjectContent){
obj =
(obj){
value = "domain-c7"
_type = "ClusterComputeResource"
}
propSet[] =
(DynamicProperty){
name = "network"
val =
(ArrayOfManagedObjectReference){
ManagedObjectReference[] =
(ManagedObjectReference){
value = "network-54"
_type = "Network"
},
(ManagedObjectReference){
value = "dvportgroup-14"
_type = "DistributedVirtualPortgroup"
},
}
},
}]
"""
objects = []
obj = ObjectContent(obj=vim_util.get_moref("domain-c7",
"ClusterComputeResource"),
propSet=[])
value = fake.DataObject()
value.ManagedObjectReference = []
for network in networks:
value.ManagedObjectReference.append(network)
obj.propSet.append(
DynamicProperty(name='network',
val=value))
objects.append(obj)
return ResultSet(objects=objects)
def test_get_network_no_match(self):
net_morefs = [vim_util.get_moref("dvportgroup-135",
"DistributedVirtualPortgroup"),
vim_util.get_moref("dvportgroup-136",
"DistributedVirtualPortgroup")]
networks = self._build_cluster_networks(net_morefs)
self._continue_retrieval_called = False
def mock_call_method(module, method, *args, **kwargs):
if method == 'get_object_properties':
return networks
if method == 'get_object_property':
result = fake.DataObject()
result.name = 'no-match'
return result
if method == 'continue_retrieval':
self._continue_retrieval_called = True
with mock.patch.object(self._session, '_call_method',
mock_call_method):
res = network_util.get_network_with_the_name(self._session,
'fake_net',
'fake_cluster')
self.assertTrue(self._continue_retrieval_called)
self.assertIsNone(res)
def _get_network_dvs_match(self, name, token=False):
net_morefs = [vim_util.get_moref("dvportgroup-135",
"DistributedVirtualPortgroup")]
networks = self._build_cluster_networks(net_morefs)
def mock_call_method(module, method, *args, **kwargs):
if method == 'get_object_properties':
return networks
if method == 'get_object_property':
result = fake.DataObject()
if not token or self._continue_retrieval_called:
result.name = name
else:
result.name = 'fake_name'
result.key = 'fake_key'
result.distributedVirtualSwitch = 'fake_dvs'
return result
if method == 'continue_retrieval':
if token:
self._continue_retrieval_called = True
return networks
if method == 'cancel_retrieval':
self._cancel_retrieval_called = True
with mock.patch.object(self._session, '_call_method',
mock_call_method):
res = network_util.get_network_with_the_name(self._session,
'fake_net',
'fake_cluster')
self.assertIsNotNone(res)
def test_get_network_dvs_exact_match(self):
self._cancel_retrieval_called = False
self._get_network_dvs_match('fake_net')
self.assertTrue(self._cancel_retrieval_called)
def test_get_network_dvs_match(self):
self._cancel_retrieval_called = False
self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net')
self.assertTrue(self._cancel_retrieval_called)
def test_get_network_dvs_match_with_token(self):
self._continue_retrieval_called = False
self._cancel_retrieval_called = False
self._get_network_dvs_match('dvs_7-virtualwire-7-fake_net',
token=True)
self.assertTrue(self._continue_retrieval_called)
self.assertTrue(self._cancel_retrieval_called)
def test_get_network_network_match(self):
net_morefs = [vim_util.get_moref("network-54", "Network")]
networks = self._build_cluster_networks(net_morefs)
def mock_call_method(module, method, *args, **kwargs):
if method == 'get_object_properties':
return networks
if method == 'get_object_property':
return 'fake_net'
with mock.patch.object(self._session, '_call_method',
mock_call_method):
res = network_util.get_network_with_the_name(self._session,
'fake_net',
'fake_cluster')
self.assertIsNotNone(res)
class GetVlanIdAndVswitchForPortgroupTestCase(test.NoDBTestCase):
@mock.patch.object(vm_util, 'get_host_ref')
def test_no_port_groups(self, mock_get_host_ref):
session = mock.Mock()
session._call_method.return_value = None
self.assertRaises(
exception.NovaException,
network_util.get_vlanid_and_vswitch_for_portgroup,
session,
'port_group_name',
'fake_cluster'
)
@mock.patch.object(vm_util, 'get_host_ref')
def test_valid_port_group(self, mock_get_host_ref):
session = mock.Mock()
session._call_method.return_value = self._fake_port_groups()
vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup(
session,
'port_group_name',
'fake_cluster'
)
self.assertEqual(vlanid, 100)
self.assertEqual(vswitch, 'vswitch_name')
@mock.patch.object(vm_util, 'get_host_ref')
def test_unknown_port_group(self, mock_get_host_ref):
session = mock.Mock()
session._call_method.return_value = self._fake_port_groups()
vlanid, vswitch = network_util.get_vlanid_and_vswitch_for_portgroup(
session,
'unknown_port_group',
'fake_cluster'
)
self.assertIsNone(vlanid)
self.assertIsNone(vswitch)
def _fake_port_groups(self):
port_group_spec = fake.DataObject()
port_group_spec.name = 'port_group_name'
port_group_spec.vlanId = 100
port_group = fake.DataObject()
port_group.vswitch = 'vswitch_name'
port_group.spec = port_group_spec
response = fake.DataObject()
response.HostPortGroup = [port_group]
return response | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
__author__ = 'Alan Viars @aviars'
import jwt
import sys
from cryptography.x509 import load_pem_x509_certificate, OID_COMMON_NAME
from cryptography.hazmat.backends import default_backend
from collections import OrderedDict
def verify_poet(jws, public_key_string):
python_version = sys.version_info.major
#print("jws:", jws)
#print("Public_key_string:", public_key_string)
jws = jws.rstrip().lstrip()
#print(sys.version_info)
certBytes = list(public_key_string.encode())
if python_version == 3:
certBytes = bytes(certBytes)
certificate = load_pem_x509_certificate(certBytes, default_backend())
publicKey = certificate.public_key()
cn = certificate.subject.get_attributes_for_oid(OID_COMMON_NAME)[0].value
try:
payload = jwt.decode(jws, publicKey, algorithms=['RS256'])
except jwt.exceptions.ExpiredSignatureError:
payload = """{"error":"EXPIRED"}"""
if payload.get('iss', ''):
if payload['iss']!=cn:
payload = """{"error":"The CN (Common Name) in the public certificate did not match the iss (Issuer) in the JWT payload."}"""
else:
payload = """{"error":"iss (Issuer) was not found in the payload."}"""
return payload
#command line app.
if __name__ == "__main__":
if len(sys.argv)!=3:
print("Usage:")
print("verify_poet.py [ENCODED_JWT_FILE] [PUBLIC_CERT_FILE]")
print("Example: verify_poet.py my.jwt my_public_cert.pem")
sys.exit(1)
jwt_path = sys.argv[1]
public_key_path = sys.argv[2]
jwt_fh = open(jwt_path)
public_key_fh = open(public_key_path)
jws = jwt_fh.read()
payload = verify_poet(jws, public_key_fh.read())
print(payload)
public_key_fh.close()
jwt_fh.close() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# idler.py
#
# Copyright 2011, 2012 Patrick Ulbrich <zulu99@gmx.net>
# Copyright 2011 Leighton Earl <leighton.earl@gmx.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import threading
import time
import sys
from daemon.imaplib2 import AUTH
class Idler(object):
def __init__(self, account, sync_callback):
self.RECONNECT_RETRY_INTERVAL = 5 # minutes
self._thread = threading.Thread(target=self._idle)
self._event = threading.Event()
self._sync_callback = sync_callback
self._account = account
self._conn = account.get_connection(use_existing = True) # connection has been opened in mailnag.py already (immediate check)
self._disposed = False
if self._conn == None:
raise Exception("Failed to establish a connection for account '%s'" % account.name)
# Need to get out of AUTH mode of fresh connections.
if self._conn.state == AUTH:
self._select(self._conn, account.folder)
def run(self):
if self._disposed:
raise Exception("Idler has been disposed")
self._thread.start()
def dispose(self):
if self._thread.is_alive():
self._event.set()
self._thread.join()
try:
if self._conn != None:
# (calls idle_callback)
self._conn.close()
# shutdown existing callback thread
self._conn.logout()
except:
pass
self._disposed = True
print "Idler closed"
# idle thread
def _idle(self):
self._reconnect()
while True:
# if the event is set here,
# disposed() must have been called
# so stop the idle thread.
if self._event.isSet():
return
self._needsync = False
self._conn_closed = False
# register idle callback that is called whenever an idle event arrives (new mail / mail deleted).
# the callback is called after 10 minutes at the latest. gmail sends keepalive events every 5 minutes.
self._conn.idle(callback = self._idle_callback, timeout = 60 * 10)
# waits for the event to be set
# (in idle callback or in dispose())
self._event.wait()
# if the event is set due to idle sync
if self._needsync:
self._event.clear()
if self._conn_closed:
self._reconnect()
if self._conn != None:
self._sync_callback(self._account)
# idle callback (runs on a further thread)
def _idle_callback(self, args):
# check if the connection has been reset by provider
self._conn_closed = (args[2] != None) and (args[2][0] is self._conn.abort)
# flag that a mail sync is needed
self._needsync = True
# trigger waiting _idle thread
self._event.set()
def _reconnect(self):
# connection has been reset by provider -> try to reconnect
print "Idler thread for account '%s' has been disconnected" % self._account.name
# conn has already been closed, don't try to close it again
# self._conn.close() # (calls idle_callback)
# shutdown existing callback thread
self._conn.logout()
self._conn = None
while (self._conn == None) and (not self._event.isSet()):
sys.stdout.write("Trying to reconnect Idler thread for account '%s'..." % self._account.name)
self._conn = self._account.get_connection(use_existing = False)
if self._conn == None:
sys.stdout.write("FAILED\n")
print "Trying again in %s minutes" % self.RECONNECT_RETRY_INTERVAL
self._wait(60 * self.RECONNECT_RETRY_INTERVAL) # don't hammer the server
else:
sys.stdout.write("OK\n")
if self._conn != None:
self._select(self._conn, self._account.folder)
def _select(self, conn, folder):
folder = folder.strip()
if len(folder) > 0:
conn.select(folder)
else:
conn.select("INBOX")
def _wait(self, secs):
start_time = time.time()
while (((time.time() - start_time) < secs) and (not self._event.isSet())):
time.sleep(1) | unknown | codeparrot/codeparrot-clean | ||
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<link href="../../../dist/css/bootstrap.min.css" rel="stylesheet">
<title>Modal</title>
<style>
#tall {
height: 1500px;
width: 100px;
}
</style>
</head>
<body>
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
<div class="container-fluid">
<button class="navbar-toggler" type="button" data-bs-toggle="collapse" data-bs-target="#navbarResponsive" aria-controls="navbarResponsive" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarResponsive">
<a class="navbar-brand" href="#">This shouldn't jump!</a>
<ul class="navbar-nav">
<li class="nav-item">
<a class="nav-link active" href="#" aria-current="page">Home</a>
</li>
<li class="nav-item">
<a class="nav-link" href="#">Link</a>
</li>
<li class="nav-item">
<a class="nav-link" href="#">Link</a>
</li>
</ul>
</div>
</div>
</nav>
<div class="container mt-3">
<h1>Modal <small>Bootstrap Visual Test</small></h1>
<div class="modal fade" id="myModal" tabindex="-1" aria-labelledby="myModalLabel" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h1 class="modal-title fs-4" id="myModalLabel">Modal title</h1>
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
</div>
<div class="modal-body">
<h4>Text in a modal</h4>
<p>Duis mollis, est non commodo luctus, nisi erat porttitor ligula.</p>
<h4>Popover in a modal</h4>
<p>This <button type="button" class="btn btn-primary" data-bs-toggle="popover" data-bs-placement="left" title="Popover title" data-bs-content="And here's some amazing content. It's very engaging. Right?">button</button> should trigger a popover on click.</p>
<h4>Tooltips in a modal</h4>
<p><a href="#" data-bs-toggle="tooltip" data-bs-placement="top" title="Tooltip on top">This link</a> and <a href="#" data-bs-toggle="tooltip" data-bs-placement="bottom" title="Tooltip on bottom">that link</a> should have tooltips on hover.</p>
<div id="accordion" role="tablist">
<div class="card" role="presentation">
<div class="card-header" role="tab" id="headingOne">
<h5 class="mb-0">
<a data-bs-toggle="collapse" href="#collapseOne" aria-expanded="true" aria-controls="collapseOne">
Collapsible Group Item #1
</a>
</h5>
</div>
<div id="collapseOne" class="collapse show" data-bs-parent="#accordion" role="tabpanel" aria-labelledby="headingOne">
<div class="card-body">
Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS.
</div>
</div>
</div>
<div class="card" role="presentation">
<div class="card-header" role="tab" id="headingTwo">
<h5 class="mb-0">
<a class="collapsed" data-bs-toggle="collapse" href="#collapseTwo" aria-expanded="false" aria-controls="collapseTwo">
Collapsible Group Item #2
</a>
</h5>
</div>
<div id="collapseTwo" class="collapse" data-bs-parent="#accordion" role="tabpanel" aria-labelledby="headingTwo">
<div class="card-body">
Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS.
</div>
</div>
</div>
<div class="card" role="presentation">
<div class="card-header" role="tab" id="headingThree">
<h5 class="mb-0">
<a class="collapsed" data-bs-toggle="collapse" href="#collapseThree" aria-expanded="false" aria-controls="collapseThree">
Collapsible Group Item #3
</a>
</h5>
</div>
<div id="collapseThree" class="collapse" data-bs-parent="#accordion" role="tabpanel" aria-labelledby="headingThree">
<div class="card-body">
Anim pariatur cliche reprehenderit, enim eiusmod high life accusamus terry richardson ad squid. 3 wolf moon officia aute, non cupidatat skateboard dolor brunch. Food truck quinoa nesciunt laborum eiusmod. Brunch 3 wolf moon tempor, sunt aliqua put a bird on it squid single-origin coffee nulla assumenda shoreditch et. Nihil anim keffiyeh helvetica, craft beer labore wes anderson cred nesciunt sapiente ea proident. Ad vegan excepteur butcher vice lomo. Leggings occaecat craft beer farm-to-table, raw denim aesthetic synth nesciunt you probably haven't heard of them accusamus labore sustainable VHS.
</div>
</div>
</div>
</div>
<hr>
<h4>Overflowing text to show scroll behavior</h4>
<p>Cras mattis consectetur purus sit amet fermentum. Cras justo odio, dapibus ac facilisis in, egestas eget quam. Morbi leo risus, porta ac consectetur ac, vestibulum at eros.</p>
<p>Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Vivamus sagittis lacus vel augue laoreet rutrum faucibus dolor auctor.</p>
<p>Aenean lacinia bibendum nulla sed consectetur. Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Donec sed odio dui. Donec ullamcorper nulla non metus auctor fringilla.</p>
<p>Cras mattis consectetur purus sit amet fermentum. Cras justo odio, dapibus ac facilisis in, egestas eget quam. Morbi leo risus, porta ac consectetur ac, vestibulum at eros.</p>
<p>Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Vivamus sagittis lacus vel augue laoreet rutrum faucibus dolor auctor.</p>
<p>Aenean lacinia bibendum nulla sed consectetur. Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Donec sed odio dui. Donec ullamcorper nulla non metus auctor fringilla.</p>
<p>Cras mattis consectetur purus sit amet fermentum. Cras justo odio, dapibus ac facilisis in, egestas eget quam. Morbi leo risus, porta ac consectetur ac, vestibulum at eros.</p>
<p>Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Vivamus sagittis lacus vel augue laoreet rutrum faucibus dolor auctor.</p>
<p>Aenean lacinia bibendum nulla sed consectetur. Praesent commodo cursus magna, vel scelerisque nisl consectetur et. Donec sed odio dui. Donec ullamcorper nulla non metus auctor fringilla.</p>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
<button type="button" class="btn btn-primary">Save changes</button>
</div>
</div>
</div>
</div>
<div class="modal fade" id="firefoxModal" tabindex="-1" aria-labelledby="firefoxModalLabel" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<h1 class="modal-title fs-4" id="firefoxModalLabel">Firefox Bug Test</h1>
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
</div>
<div class="modal-body">
<ol>
<li>Ensure you're using Firefox.</li>
<li>Open a new tab and then switch back to this tab.</li>
<li>Click into this input: <input type="text" id="ff-bug-input"></li>
<li>Switch to the other tab and then back to this tab.</li>
</ol>
<p>Test result: <strong id="ff-bug-test-result"></strong></p>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
<button type="button" class="btn btn-primary">Save changes</button>
</div>
</div>
</div>
</div>
<div class="modal fade" id="slowModal" tabindex="-1" aria-labelledby="slowModalLabel" aria-hidden="true" style="transition-duration: 5s;">
<div class="modal-dialog" style="transition-duration: inherit;">
<div class="modal-content">
<div class="modal-header">
<h1 class="modal-title fs-4" id="slowModalLabel">Lorem slowly</h1>
<button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"></button>
</div>
<div class="modal-body">
<p>Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Duis mollis, est non commodo luctus, nisi erat porttitor ligula, eget lacinia odio sem nec elit. Donec sed odio dui. Nullam quis risus eget urna mollis ornare vel eu leo. Nulla vitae elit libero, a pharetra augue.</p>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
<button type="button" class="btn btn-primary">Save changes</button>
</div>
</div>
</div>
</div>
<button type="button" class="btn btn-primary btn-lg" data-bs-toggle="modal" data-bs-target="#myModal">
Launch demo modal
</button>
<button type="button" class="btn btn-primary btn-lg" id="tall-toggle">
Toggle tall <body> content
</button>
<br><br>
<button type="button" class="btn btn-secondary btn-lg" data-bs-toggle="modal" data-bs-target="#firefoxModal">
Launch Firefox bug test modal
</button>
(<a href="https://github.com/twbs/bootstrap/issues/18365">See Issue #18365</a>)
<br><br>
<button type="button" class="btn btn-secondary btn-lg" data-bs-toggle="modal" data-bs-target="#slowModal">
Launch modal with slow transition
</button>
<br><br>
<div class="text-bg-dark p-2" id="tall" style="display: none;">
Tall body content to force the page to have a scrollbar.
</div>
<button type="button" class="btn btn-secondary btn-lg" data-bs-toggle="modal" data-bs-target="<div class="modal fade the-bad" tabindex="-1"><div class="modal-dialog"><div class="modal-content"><div class="modal-header"><button type="button" class="btn-close" data-bs-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button><h1 class="modal-title fs-4">The Bad Modal</h1></div><div class="modal-body">This modal's HTTML source code is declared inline, inside the data-bs-target attribute of it's show-button</div></div></div></div>">
Modal with an XSS inside the data-bs-target
</button>
<br><br>
<button type="button" class="btn btn-secondary btn-lg" id="btnPreventModal">
Launch prevented modal on hide (to see the result open your console)
</button>
</div>
<script src="../../../dist/js/bootstrap.bundle.js"></script>
<script>
/* global bootstrap: false */
const ffBugTestResult = document.getElementById('ff-bug-test-result')
const firefoxTestDone = false
function reportFirefoxTestResult(result) {
if (!firefoxTestDone) {
ffBugTestResult.classList.add(result ? 'text-success' : 'text-danger')
ffBugTestResult.textContent = result ? 'PASS' : 'FAIL'
}
}
document.querySelectorAll('[data-bs-toggle="popover"]').forEach(popoverEl => new bootstrap.Popover(popoverEl))
document.querySelectorAll('[data-bs-toggle="tooltip"]').forEach(tooltipEl => new bootstrap.Tooltip(tooltipEl))
const tall = document.getElementById('tall')
document.getElementById('tall-toggle').addEventListener('click', () => {
tall.style.display = tall.style.display === 'none' ? 'block' : 'none'
})
const ffBugInput = document.getElementById('ff-bug-input')
const firefoxModal = document.getElementById('firefoxModal')
function handlerClickFfBugInput() {
firefoxModal.addEventListener('focus', reportFirefoxTestResult.bind(false))
ffBugInput.addEventListener('focus', reportFirefoxTestResult.bind(true))
ffBugInput.removeEventListener('focus', handlerClickFfBugInput)
}
ffBugInput.addEventListener('focus', handlerClickFfBugInput)
const modalFf = new bootstrap.Modal(firefoxModal)
document.getElementById('btnPreventModal').addEventListener('click', () => {
const shownFirefoxModal = () => {
modalFf.hide()
firefoxModal.removeEventListener('shown.bs.modal', hideFirefoxModal)
}
const hideFirefoxModal = event => {
event.preventDefault()
firefoxModal.removeEventListener('hide.bs.modal', hideFirefoxModal)
if (modalFf._isTransitioning) {
console.error('Modal plugin should not set _isTransitioning when hide event is prevented')
} else {
console.log('Test passed')
modalFf.hide() // work as expected
}
}
firefoxModal.addEventListener('shown.bs.modal', shownFirefoxModal)
firefoxModal.addEventListener('hide.bs.modal', hideFirefoxModal)
modalFf.show()
})
// Test transition duration
let t0
let t1
const slowModal = document.getElementById('slowModal')
slowModal.addEventListener('shown.bs.modal', () => {
t1 = performance.now()
console.log(`transition-duration took ${t1 - t0}ms.`)
})
slowModal.addEventListener('show.bs.modal', () => {
t0 = performance.now()
})
</script>
</body>
</html> | html | github | https://github.com/twbs/bootstrap | js/tests/visual/modal.html |
from itertools import izip
from django.db.backends.util import truncate_name, typecast_timestamp
from django.db.models.sql import compiler
from django.db.models.sql.constants import TABLE_NAME, MULTI
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(compiler.SQLCompiler):
def get_columns(self, with_aliases=False):
"""
Return the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguitity with nested queries.
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias))
for alias, col in self.query.extra_select.iteritems()]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
# This loop customized for GeoQuery.
for col, field in izip(self.query.select, self.query.select_fields):
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and column not in only_load[table]:
continue
r = self.get_field_select(field, alias, column)
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
self.get_extra_select_format(alias) % aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
# This loop customized for GeoQuery.
for (table, col), field in izip(self.query.related_select_cols, self.query.related_select_fields):
r = self.get_field_select(field, table, col)
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
result = []
if opts is None:
opts = self.query.model._meta
aliases = set()
only_load = self.deferred_to_columns()
# Skip all proxy to the root proxied model
proxied_model = opts.concrete_model
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
if model is proxied_model:
alias = start_alias
else:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias][TABLE_NAME]
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
# This part of the function is customized for GeoQuery. We
# see if there was any custom selection specified in the
# dictionary, and set up the selection format appropriately.
field_sel = self.get_field_select(field, alias)
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (field_sel, c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = field_sel
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def resolve_columns(self, row, fields=()):
"""
This routine is necessary so that distances and geometries returned
from extra selection SQL get resolved appropriately into Python
objects.
"""
values = []
aliases = self.query.extra_select.keys()
# Have to set a starting row number offset that is used for
# determining the correct starting row index -- needed for
# doing pagination with Oracle.
rn_offset = 0
if self.connection.ops.oracle:
if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1
index_start = rn_offset + len(aliases)
# Converting any extra selection values (e.g., geometries and
# distance objects added by GeoQuerySet methods).
values = [self.query.convert_values(v,
self.query.extra_select_fields.get(a, None),
self.connection)
for v, a in izip(row[rn_offset:index_start], aliases)]
if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
# We resolve the rest of the columns if we're on Oracle or if
# the `geo_values` attribute is defined.
for value, field in map(None, row[index_start:], fields):
values.append(self.query.convert_values(value, field, self.connection))
else:
values.extend(row[index_start:])
return tuple(values)
#### Routines unique to GeoQuery ####
def get_extra_select_format(self, alias):
sel_fmt = '%s'
if hasattr(self.query, 'custom_select') and alias in self.query.custom_select:
sel_fmt = sel_fmt % self.query.custom_select[alias]
return sel_fmt
def get_field_select(self, field, alias=None, column=None):
"""
Returns the SELECT SQL string for the given field. Figures out
if any custom selection SQL is needed for the column The `alias`
keyword may be used to manually specify the database table where
the column exists, if not in the model associated with this
`GeoQuery`. Similarly, `column` may be used to specify the exact
column name, rather than using the `column` attribute on `field`.
"""
sel_fmt = self.get_select_format(field)
if field in self.query.custom_select:
field_sel = sel_fmt % self.query.custom_select[field]
else:
field_sel = sel_fmt % self._field_column(field, alias, column)
return field_sel
def get_select_format(self, fld):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
if self.connection.ops.select and hasattr(fld, 'geom_type'):
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = self.connection.ops.select
# Because WKT doesn't contain spatial reference information,
# the SRID is prefixed to the returned WKT to ensure that the
# transformed geometries have an SRID different than that of the
# field -- this is only used by `transform` for Oracle and
# SpatiaLite backends.
if self.query.transformed_srid and ( self.connection.ops.oracle or
self.connection.ops.spatialite ):
sel_fmt = "'SRID=%d;'||%s" % (self.query.transformed_srid, sel_fmt)
else:
sel_fmt = '%s'
return sel_fmt
# Private API utilities, subject to change.
def _field_column(self, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuery` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None: table_alias = self.query.model._meta.db_table
return "%s.%s" % (self.quote_name_unless_alias(table_alias),
self.connection.ops.quote_name(column or field.column))
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
"""
This is overridden for GeoDjango to properly cast date columns, since
`GeoQuery.resolve_columns` is used for spatial values.
See #14648, #16757.
"""
def results_iter(self):
if self.connection.ops.oracle:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if self.connection.ops.oracle:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/clock/mediatek,mt6795-clock.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: MediaTek Functional Clock Controller for MT6795
maintainers:
- AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
- Chun-Jie Chen <chun-jie.chen@mediatek.com>
description: |
The clock architecture in MediaTek like below
PLLs -->
dividers -->
muxes
-->
clock gate
The devices provide clock gate control in different IP blocks.
properties:
compatible:
enum:
- mediatek,mt6795-mfgcfg
- mediatek,mt6795-vdecsys
- mediatek,mt6795-vencsys
reg:
maxItems: 1
'#clock-cells':
const: 1
required:
- compatible
- reg
- '#clock-cells'
additionalProperties: false
examples:
- |
soc {
#address-cells = <2>;
#size-cells = <2>;
mfgcfg: clock-controller@13000000 {
compatible = "mediatek,mt6795-mfgcfg";
reg = <0 0x13000000 0 0x1000>;
#clock-cells = <1>;
};
vdecsys: clock-controller@16000000 {
compatible = "mediatek,mt6795-vdecsys";
reg = <0 0x16000000 0 0x1000>;
#clock-cells = <1>;
};
vencsys: clock-controller@18000000 {
compatible = "mediatek,mt6795-vencsys";
reg = <0 0x18000000 0 0x1000>;
#clock-cells = <1>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/clock/mediatek,mt6795-clock.yaml |
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
package agent
import (
"context"
"fmt"
"net"
"net/http"
"os"
"testing"
"time"
"github.com/hashicorp/go-hclog"
log "github.com/hashicorp/go-hclog"
"github.com/hashicorp/vault/api"
credAppRole "github.com/hashicorp/vault/builtin/credential/approle"
"github.com/hashicorp/vault/command/agentproxyshared/auth"
agentapprole "github.com/hashicorp/vault/command/agentproxyshared/auth/approle"
"github.com/hashicorp/vault/command/agentproxyshared/cache"
"github.com/hashicorp/vault/command/agentproxyshared/sink"
"github.com/hashicorp/vault/command/agentproxyshared/sink/file"
"github.com/hashicorp/vault/command/agentproxyshared/sink/inmem"
"github.com/hashicorp/vault/helper/useragent"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/helper/logging"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
)
const policyAutoAuthAppRole = `
path "/kv/*" {
capabilities = ["sudo", "create", "read", "update", "delete", "list"]
}
path "/auth/token/create" {
capabilities = ["create", "update"]
}
`
func TestCache_UsingAutoAuthToken(t *testing.T) {
var err error
logger := logging.NewVaultLogger(log.Trace)
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"kv": vault.LeasedPassthroughBackendFactory,
},
CredentialBackends: map[string]logical.Factory{
"approle": credAppRole.Factory,
},
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
defer cluster.Cleanup()
cores := cluster.Cores
vault.TestWaitActive(t, cores[0].Core)
client := cores[0].Client
defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress))
os.Setenv(api.EnvVaultAddress, client.Address())
defer os.Setenv(api.EnvVaultCACert, os.Getenv(api.EnvVaultCACert))
os.Setenv(api.EnvVaultCACert, fmt.Sprintf("%s/ca_cert.pem", cluster.TempDir))
err = client.Sys().Mount("kv", &api.MountInput{
Type: "kv",
})
if err != nil {
t.Fatal(err)
}
// Create a secret in the backend
_, err = client.Logical().Write("kv/foo", map[string]interface{}{
"value": "bar",
"ttl": "1h",
})
if err != nil {
t.Fatal(err)
}
// Add an kv-admin policy
if err := client.Sys().PutPolicy("test-autoauth", policyAutoAuthAppRole); err != nil {
t.Fatal(err)
}
// Enable approle
err = client.Sys().EnableAuthWithOptions("approle", &api.EnableAuthOptions{
Type: "approle",
})
if err != nil {
t.Fatal(err)
}
_, err = client.Logical().Write("auth/approle/role/test1", map[string]interface{}{
"bind_secret_id": "true",
"token_ttl": "3s",
"token_max_ttl": "10s",
"policies": []string{"test-autoauth"},
})
if err != nil {
t.Fatal(err)
}
resp, err := client.Logical().Write("auth/approle/role/test1/secret-id", nil)
if err != nil {
t.Fatal(err)
}
secretID1 := resp.Data["secret_id"].(string)
resp, err = client.Logical().Read("auth/approle/role/test1/role-id")
if err != nil {
t.Fatal(err)
}
roleID1 := resp.Data["role_id"].(string)
rolef, err := os.CreateTemp("", "auth.role-id.test.")
if err != nil {
t.Fatal(err)
}
role := rolef.Name()
rolef.Close() // WriteFile doesn't need it open
defer os.Remove(role)
t.Logf("input role_id_file_path: %s", role)
secretf, err := os.CreateTemp("", "auth.secret-id.test.")
if err != nil {
t.Fatal(err)
}
secret := secretf.Name()
secretf.Close()
defer os.Remove(secret)
t.Logf("input secret_id_file_path: %s", secret)
// We close these right away because we're just basically testing
// permissions and finding a usable file name
ouf, err := os.CreateTemp("", "auth.tokensink.test.")
if err != nil {
t.Fatal(err)
}
out := ouf.Name()
ouf.Close()
os.Remove(out)
t.Logf("output: %s", out)
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
conf := map[string]interface{}{
"role_id_file_path": role,
"secret_id_file_path": secret,
"remove_secret_id_file_after_reading": true,
}
cacheLogger := logging.NewVaultLogger(hclog.Trace).Named("cache")
// Create the API proxier
apiProxy, err := cache.NewAPIProxy(&cache.APIProxyConfig{
Client: client,
Logger: cacheLogger.Named("apiproxy"),
UserAgentStringFunction: useragent.ProxyStringWithProxiedUserAgent,
UserAgentString: useragent.ProxyAPIProxyString(),
})
if err != nil {
t.Fatal(err)
}
// Create the lease cache proxier and set its underlying proxier to
// the API proxier.
leaseCache, err := cache.NewLeaseCache(&cache.LeaseCacheConfig{
Client: client,
BaseContext: ctx,
Proxier: apiProxy,
Logger: cacheLogger.Named("leasecache"),
CacheDynamicSecrets: true,
UserAgentToUse: "test",
})
if err != nil {
t.Fatal(err)
}
am, err := agentapprole.NewApproleAuthMethod(&auth.AuthConfig{
Logger: logger.Named("auth.approle"),
MountPath: "auth/approle",
Config: conf,
})
if err != nil {
t.Fatal(err)
}
ahConfig := &auth.AuthHandlerConfig{
Logger: logger.Named("auth.handler"),
Client: client,
}
ah := auth.NewAuthHandler(ahConfig)
errCh := make(chan error)
go func() {
errCh <- ah.Run(ctx, am)
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
config := &sink.SinkConfig{
Logger: logger.Named("sink.file"),
Config: map[string]interface{}{
"path": out,
},
}
fs, err := file.NewFileSink(config)
if err != nil {
t.Fatal(err)
}
config.Sink = fs
ss := sink.NewSinkServer(&sink.SinkServerConfig{
Logger: logger.Named("sink.server"),
Client: client,
})
inmemSinkConfig := &sink.SinkConfig{
Logger: logger.Named("sink.inmem"),
}
inmemSink, err := inmem.New(inmemSinkConfig, leaseCache)
if err != nil {
t.Fatal(err)
}
inmemSinkConfig.Sink = inmemSink
go func() {
errCh <- ss.Run(ctx, ah.OutputCh, []*sink.SinkConfig{config, inmemSinkConfig}, ah.AuthInProgress)
}()
defer func() {
select {
case <-ctx.Done():
case err := <-errCh:
if err != nil {
t.Fatal(err)
}
}
}()
// This has to be after the other defers so it happens first. It allows
// successful test runs to immediately cancel all of the runner goroutines
// and unblock any of the blocking defer calls by the runner's DoneCh that
// comes before this and avoid successful tests from taking the entire
// timeout duration.
defer cancel()
// Check that no sink file exists
_, err = os.Lstat(out)
if err == nil {
t.Fatal("expected err")
}
if !os.IsNotExist(err) {
t.Fatal("expected notexist err")
}
if err := os.WriteFile(role, []byte(roleID1), 0o600); err != nil {
t.Fatal(err)
} else {
logger.Trace("wrote test role 1", "path", role)
}
if err := os.WriteFile(secret, []byte(secretID1), 0o600); err != nil {
t.Fatal(err)
} else {
logger.Trace("wrote test secret 1", "path", secret)
}
getToken := func() string {
timeout := time.Now().Add(10 * time.Second)
for {
if time.Now().After(timeout) {
t.Fatal("did not find a written token after timeout")
}
val, err := os.ReadFile(out)
if err == nil {
os.Remove(out)
if len(val) == 0 {
t.Fatal("written token was empty")
}
_, err = os.Stat(secret)
if err == nil {
t.Fatal("secret file exists but was supposed to be removed")
}
return string(val)
}
time.Sleep(250 * time.Millisecond)
}
}
t.Logf("auto-auth token: %q", getToken())
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatal(err)
}
defer listener.Close()
// Create a muxer and add paths relevant for the lease cache layer
mux := http.NewServeMux()
mux.Handle(consts.AgentPathCacheClear, leaseCache.HandleCacheClear(ctx))
// Setting useAutoAuthToken to true to ensure that the auto-auth token is used
mux.Handle("/", cache.ProxyHandler(ctx, cacheLogger, leaseCache, inmemSink, false, true, nil, nil))
server := &http.Server{
Handler: mux,
ReadHeaderTimeout: 10 * time.Second,
ReadTimeout: 30 * time.Second,
IdleTimeout: 5 * time.Minute,
ErrorLog: cacheLogger.StandardLogger(nil),
}
go server.Serve(listener)
testClient, err := api.NewClient(api.DefaultConfig())
if err != nil {
t.Fatal(err)
}
if err := testClient.SetAddress("http://" + listener.Addr().String()); err != nil {
t.Fatal(err)
}
// Wait for listeners to come up
time.Sleep(2 * time.Second)
// This block tests that no token on the client is detected by the agent
// and the auto-auth token is used
{
// Empty the token in the client to ensure that auto-auth token is used
testClient.SetToken("")
resp, err = testClient.Logical().Read("auth/token/lookup-self")
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatalf("failed to use the auto-auth token to perform lookup-self")
}
}
// This block tests lease creation caching using the auto-auth token.
{
resp, err = testClient.Logical().Read("kv/foo")
if err != nil {
t.Fatal(err)
}
origReqID := resp.RequestID
resp, err = testClient.Logical().Read("kv/foo")
if err != nil {
t.Fatal(err)
}
// Sleep for a bit to allow renewer logic to kick in
time.Sleep(20 * time.Millisecond)
cacheReqID := resp.RequestID
if origReqID != cacheReqID {
t.Fatalf("request ID mismatch, expected second request to be a cached response: %s != %s", origReqID, cacheReqID)
}
}
// This block tests auth token creation caching (child, non-orphan tokens)
// using the auto-auth token.
{
resp, err = testClient.Logical().Write("auth/token/create", nil)
if err != nil {
t.Fatal(err)
}
origReqID := resp.RequestID
// Sleep for a bit to allow renewer logic to kick in
time.Sleep(20 * time.Millisecond)
resp, err = testClient.Logical().Write("auth/token/create", nil)
if err != nil {
t.Fatal(err)
}
cacheReqID := resp.RequestID
if origReqID != cacheReqID {
t.Fatalf("request ID mismatch, expected second request to be a cached response: %s != %s", origReqID, cacheReqID)
}
}
// This blocks tests that despite being allowed to use auto-auth token, the
// token on the request will be prioritized.
{
// Empty the token in the client to ensure that auto-auth token is used
testClient.SetToken(client.Token())
resp, err = testClient.Logical().Read("auth/token/lookup-self")
if err != nil {
t.Fatal(err)
}
if resp == nil || resp.Data["id"] != client.Token() {
t.Fatalf("failed to use the cluster client token to perform lookup-self")
}
}
} | go | github | https://github.com/hashicorp/vault | command/agent/cache_end_to_end_test.go |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of :navigate."""
import posixpath
from qutebrowser.browser import webelem
from qutebrowser.config import config
from qutebrowser.utils import objreg, urlutils, log, message, qtutils
from qutebrowser.mainwindow import mainwindow
class Error(Exception):
"""Raised when the navigation can't be done."""
def incdec(url, count, inc_or_dec):
"""Helper method for :navigate when `where' is increment/decrement.
Args:
url: The current url.
count: How much to increment or decrement by.
inc_or_dec: Either 'increment' or 'decrement'.
tab: Whether to open the link in a new tab.
background: Open the link in a new background tab.
window: Open the link in a new window.
"""
segments = set(config.val.url.incdec_segments)
try:
new_url = urlutils.incdec_number(url, inc_or_dec, count,
segments=segments)
except urlutils.IncDecError as error:
raise Error(error.msg)
return new_url
def path_up(url, count):
"""Helper method for :navigate when `where' is up.
Args:
url: The current url.
count: The number of levels to go up in the url.
"""
path = url.path()
if not path or path == '/':
raise Error("Can't go up!")
for _i in range(0, min(count, path.count('/'))):
path = posixpath.join(path, posixpath.pardir)
path = posixpath.normpath(path)
url.setPath(path)
return url
def _find_prevnext(prev, elems):
"""Find a prev/next element in the given list of elements."""
# First check for <link rel="prev(ious)|next">
rel_values = {'prev', 'previous'} if prev else {'next'}
for e in elems:
if e.tag_name() not in ['link', 'a'] or 'rel' not in e:
continue
if set(e['rel'].split(' ')) & rel_values:
log.hints.debug("Found {!r} with rel={}".format(e, e['rel']))
return e
# Then check for regular links/buttons.
elems = [e for e in elems if e.tag_name() != 'link']
option = 'prev_regexes' if prev else 'next_regexes'
if not elems:
return None
# pylint: disable=bad-config-option
for regex in getattr(config.val.hints, option):
# pylint: enable=bad-config-option
log.hints.vdebug("== Checking regex '{}'.".format(regex.pattern))
for e in elems:
text = str(e)
if not text:
continue
if regex.search(text):
log.hints.debug("Regex '{}' matched on '{}'.".format(
regex.pattern, text))
return e
else:
log.hints.vdebug("No match on '{}'!".format(text))
return None
def prevnext(*, browsertab, win_id, baseurl, prev=False,
tab=False, background=False, window=False):
"""Click a "previous"/"next" element on the page.
Args:
browsertab: The WebKitTab/WebEngineTab of the page.
baseurl: The base URL of the current tab.
prev: True to open a "previous" link, False to open a "next" link.
tab: True to open in a new tab, False for the current tab.
background: True to open in a background tab.
window: True to open in a new window, False for the current one.
"""
def _prevnext_cb(elems):
if elems is None:
message.error("There was an error while getting hint elements")
return
elem = _find_prevnext(prev, elems)
word = 'prev' if prev else 'forward'
if elem is None:
message.error("No {} links found!".format(word))
return
url = elem.resolve_url(baseurl)
if url is None:
message.error("No {} links found!".format(word))
return
qtutils.ensure_valid(url)
cur_tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
if window:
new_window = mainwindow.MainWindow(
private=cur_tabbed_browser.private)
new_window.show()
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=new_window.win_id)
tabbed_browser.tabopen(url, background=False)
elif tab:
cur_tabbed_browser.tabopen(url, background=background)
else:
browsertab.openurl(url)
browsertab.elements.find_css(webelem.SELECTORS[webelem.Group.links],
_prevnext_cb) | unknown | codeparrot/codeparrot-clean | ||
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package abac
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Policy) DeepCopyInto(out *Policy) {
*out = *in
out.TypeMeta = in.TypeMeta
out.Spec = in.Spec
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
func (in *Policy) DeepCopy() *Policy {
if in == nil {
return nil
}
out := new(Policy)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Policy) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicySpec) DeepCopyInto(out *PolicySpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicySpec.
func (in *PolicySpec) DeepCopy() *PolicySpec {
if in == nil {
return nil
}
out := new(PolicySpec)
in.DeepCopyInto(out)
return out
} | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/abac/zz_generated.deepcopy.go |
/*
* Copyright 2010-2025 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir
import org.jetbrains.kotlin.KtFakeSourceElementKind
import org.jetbrains.kotlin.analysis.api.annotations.KaAnnotation
import org.jetbrains.kotlin.analysis.api.fir.annotations.computeAnnotationArguments
import org.jetbrains.kotlin.analysis.api.impl.base.annotations.KaAnnotationImpl
import org.jetbrains.kotlin.analysis.api.symbols.KaSymbol
import org.jetbrains.kotlin.analysis.low.level.api.fir.sessions.LLFirSession
import org.jetbrains.kotlin.builtins.StandardNames
import org.jetbrains.kotlin.descriptors.annotations.AnnotationUseSiteTarget
import org.jetbrains.kotlin.fir.FirAnnotationContainer
import org.jetbrains.kotlin.fir.FirElement
import org.jetbrains.kotlin.fir.FirSession
import org.jetbrains.kotlin.fir.declarations.getAnnotationsByClassId
import org.jetbrains.kotlin.fir.declarations.getStringArgument
import org.jetbrains.kotlin.fir.declarations.toAnnotationClassId
import org.jetbrains.kotlin.fir.diagnostics.ConeDiagnostic
import org.jetbrains.kotlin.fir.diagnostics.ConeUnreportedDuplicateDiagnostic
import org.jetbrains.kotlin.fir.expressions.*
import org.jetbrains.kotlin.fir.psi
import org.jetbrains.kotlin.fir.references.*
import org.jetbrains.kotlin.fir.resolve.diagnostics.ConeDiagnosticWithCandidates
import org.jetbrains.kotlin.fir.resolve.diagnostics.ConeDiagnosticWithSymbol
import org.jetbrains.kotlin.fir.resolve.diagnostics.ConeHiddenCandidateError
import org.jetbrains.kotlin.fir.resolve.toClassSymbol
import org.jetbrains.kotlin.fir.scopes.getDeclaredConstructors
import org.jetbrains.kotlin.fir.scopes.unsubstitutedScope
import org.jetbrains.kotlin.fir.symbols.FirBasedSymbol
import org.jetbrains.kotlin.fir.symbols.impl.FirConstructorSymbol
import org.jetbrains.kotlin.fir.symbols.impl.FirNamedFunctionSymbol
import org.jetbrains.kotlin.name.JvmStandardClassIds
import org.jetbrains.kotlin.psi.KtCallElement
import org.jetbrains.kotlin.util.OperatorNameConventions
/**
* Returns `true` if the symbol is for a function named `invoke`.
*/
internal fun FirBasedSymbol<*>.isInvokeFunction() =
(this as? FirNamedFunctionSymbol)?.fir?.name == OperatorNameConventions.INVOKE
internal fun FirFunctionCall.getCalleeSymbol(): FirBasedSymbol<*>? =
calleeReference.getResolvedSymbolOfNameReference()
internal fun FirFunctionCall.getCandidateSymbols(): Collection<FirBasedSymbol<*>> =
calleeReference.getCandidateSymbols()
internal fun FirReference.getResolvedSymbolOfNameReference(): FirBasedSymbol<*>? =
(this as? FirResolvedNamedReference)?.resolvedSymbol
internal fun FirReference.getResolvedKtSymbolOfNameReference(builder: KaSymbolByFirBuilder): KaSymbol? =
getResolvedSymbolOfNameReference()?.fir?.let(builder::buildSymbol)
internal fun FirErrorNamedReference.getCandidateSymbols(): Collection<FirBasedSymbol<*>> =
diagnostic.getCandidateSymbols()
internal fun FirNamedReference.getCandidateSymbols(): Collection<FirBasedSymbol<*>> = when (this) {
is FirResolvedNamedReference -> listOf(resolvedSymbol)
is FirErrorNamedReference -> getCandidateSymbols()
else -> emptyList()
}
internal fun ConeDiagnostic.getCandidateSymbols(): Collection<FirBasedSymbol<*>> =
when (this) {
is ConeHiddenCandidateError -> {
// Candidate with @Deprecated(DeprecationLevel.HIDDEN)
emptyList()
}
is ConeDiagnosticWithCandidates -> candidateSymbols
is ConeDiagnosticWithSymbol<*> -> listOf(symbol)
is ConeUnreportedDuplicateDiagnostic -> original.getCandidateSymbols()
else -> emptyList()
}
internal fun FirAnnotation.toKaAnnotation(builder: KaSymbolByFirBuilder): KaAnnotation {
val constructorSymbol = findAnnotationConstructor(this, builder.rootSession)
?.let(builder.functionBuilder::buildConstructorSymbol)
val classId = toAnnotationClassId(builder.rootSession)
return KaAnnotationImpl(
classId = classId,
psi = psi as? KtCallElement,
useSiteTarget = useSiteTarget,
lazyArguments = if (this !is FirAnnotationCall || arguments.isNotEmpty())
lazy { computeAnnotationArguments(this, builder) }
else
lazyOf(emptyList()),
constructorSymbol = constructorSymbol,
token = builder.token,
)
}
private fun findAnnotationConstructor(annotation: FirAnnotation, session: LLFirSession): FirConstructorSymbol? {
if (annotation is FirAnnotationCall) {
val constructorSymbol = annotation.calleeReference.toResolvedConstructorSymbol()
if (constructorSymbol != null) {
return constructorSymbol
}
}
// Handle unresolved annotation calls gracefully
@OptIn(UnresolvedExpressionTypeAccess::class)
val annotationClass = annotation.coneTypeOrNull?.toClassSymbol(session)?.fir ?: return null
// The search is done via scope to force Java enhancement. Annotation class might be a 'FirJavaClass'
return annotationClass
.unsubstitutedScope(session, session.getScopeSession(), withForcedTypeCalculator = false, memberRequiredPhase = null)
.getDeclaredConstructors()
.singleOrNull()
}
/**
* Implicit dispatch receiver is present when an extension function declared in object
* is imported somewhere else and used without directly referencing the object instance
* itself:
*
* ```kt
* import Foo.bar
*
* object Foo { fun String.bar() {} }
*
* fun usage() {
* "hello".bar() // this call has implicit 'Foo' dispatch receiver
* }
* ```
*/
internal val FirResolvedQualifier.isImplicitDispatchReceiver: Boolean
get() = source?.kind == KtFakeSourceElementKind.ImplicitReceiver
internal fun FirAnnotationContainer.getJvmNameFromAnnotation(session: FirSession, target: AnnotationUseSiteTarget? = null): String? {
val annotationCalls = getAnnotationsByClassId(JvmStandardClassIds.Annotations.JvmName, session)
return annotationCalls.firstNotNullOfOrNull { call ->
call.getStringArgument(StandardNames.NAME)
?.takeIf { target == null || call.useSiteTarget == target }
}
}
internal fun FirElement.unwrapSafeCall(): FirElement =
(this as? FirSafeCallExpression)?.selector ?: this | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/src/org/jetbrains/kotlin/analysis/api/fir/FirUtils.kt |
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Long: sasl-ir
Help: Initial response in SASL authentication
Protocols: LDAP IMAP POP3 SMTP
Added: 7.31.0
Category: auth
Multi: boolean
See-also:
- sasl-authzid
Example:
- --sasl-ir imap://example.com/
---
# `--sasl-ir`
Enable initial response in SASL authentication. Such an "initial response" is
a message sent by the client to the server after the client selects an
authentication mechanism. | unknown | github | https://github.com/curl/curl | docs/cmdline-opts/sasl-ir.md |
# -*- coding: utf-8 -*-
#
# Copyright The Plasma Project.
# See LICENSE.txt for details.
"""
Flex Messaging compatibility tests.
.. versionadded:: 0.1
"""
import unittest
import datetime
import uuid
import pyamf
from plasma.flex.messaging import messages
class AbstractMessageTestCase(unittest.TestCase):
"""
Tests for :class:`messages.AbstractMessage`
"""
def test_create(self):
a = messages.AbstractMessage()
self.assertEquals(a.body, None)
self.assertEquals(a.timestamp, None)
self.assertEquals(a.destination, None)
self.assertEquals(a.clientId, None)
self.assertEquals(a.headers, {})
self.assertEquals(a.timeToLive, None)
self.assertEquals(a.messageId, None)
def test_kwargs(self):
a = messages.AbstractMessage(body=[], timestamp='foo', clientId='baz',
destination='bar', headers='gak', timeToLive='spam',
messageId='eggs', python='cool')
self.assertEquals(a.body, [])
self.assertEquals(a.timestamp, 'foo')
self.assertEquals(a.destination, 'bar')
self.assertEquals(a.clientId, 'baz')
self.assertEquals(a.headers, 'gak')
self.assertEquals(a.timeToLive, 'spam')
self.assertEquals(a.messageId, 'eggs')
self.assertFalse(hasattr(a, 'python'))
def test_repr(self):
a = messages.AbstractMessage()
a.body = u'é,è'
self.assertEquals(repr(a), "<AbstractMessage body=u'\\xe9,\\xe8' "
"clientId=None destination=None headers={} messageId=None "
"timestamp=None timeToLive=None />")
class AsyncMessageTestCase(unittest.TestCase):
"""
Tests for :class:`messages.AsyncMessage`
"""
def test_init(self):
a = messages.AsyncMessage()
self.assertEquals(a.body, None)
self.assertEquals(a.timestamp, None)
self.assertEquals(a.destination, None)
self.assertEquals(a.clientId, None)
self.assertEquals(a.headers, {})
self.assertEquals(a.timeToLive, None)
self.assertEquals(a.messageId, None)
self.assertEquals(a.correlationId, None)
def test_alias(self):
alias = pyamf.get_class_alias(messages.AsyncMessage)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertFalse(alias.external)
self.assertEquals(alias.static_attrs, ['body', 'clientId',
'correlationId', 'destination', 'headers', 'messageId',
'timeToLive', 'timestamp'])
class AcknowledgeMessageTestCase(unittest.TestCase):
"""
Tests for :class:`messages.AcknowledgeMessage`
"""
def test_init(self):
a = messages.AcknowledgeMessage()
self.assertEquals(a.body, None)
self.assertEquals(a.timestamp, None)
self.assertEquals(a.destination, None)
self.assertEquals(a.clientId, None)
self.assertEquals(a.headers, {})
self.assertEquals(a.timeToLive, None)
self.assertEquals(a.messageId, None)
self.assertEquals(a.correlationId, None)
def test_kwargs(self):
a = messages.AcknowledgeMessage(foo='bar')
self.assertFalse(hasattr(a, 'foo'))
def test_alias(self):
alias = pyamf.get_class_alias(messages.AcknowledgeMessage)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertFalse(alias.external)
self.assertEquals(alias.static_attrs, ['body', 'clientId',
'correlationId', 'destination', 'headers', 'messageId',
'timeToLive', 'timestamp'])
class CommandMessageTestCase(unittest.TestCase):
"""
Tests for :class:`messages.CommandMessage`
"""
def test_init(self):
a = messages.CommandMessage()
self.assertEquals(a.body, None)
self.assertEquals(a.timestamp, None)
self.assertEquals(a.destination, None)
self.assertEquals(a.clientId, None)
self.assertEquals(a.headers, {})
self.assertEquals(a.timeToLive, None)
self.assertEquals(a.messageId, None)
self.assertEquals(a.correlationId, None)
self.assertEquals(a.operation, 10000)
def test_kwargs(self):
a = messages.CommandMessage(operation='yippee', foo='bar')
self.assertEquals(a.operation, 'yippee')
self.assertFalse(hasattr(a, 'foo'))
def test_alias(self):
alias = pyamf.get_class_alias(messages.CommandMessage)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertFalse(alias.external)
self.assertEquals(alias.static_attrs, ['body', 'clientId',
'correlationId', 'destination', 'headers', 'messageId',
'operation', 'timeToLive', 'timestamp'])
class ErrorMessageTestCase(unittest.TestCase):
"""
Tests for :class:`messages.ErrorMessage`
"""
def test_init(self):
a = messages.ErrorMessage()
self.assertEquals(a.body, None)
self.assertEquals(a.timestamp, None)
self.assertEquals(a.destination, None)
self.assertEquals(a.clientId, None)
self.assertEquals(a.headers, {})
self.assertEquals(a.timeToLive, None)
self.assertEquals(a.messageId, None)
self.assertEquals(a.correlationId, None)
self.assertEquals(a.extendedData, {})
self.assertEquals(a.faultCode, None)
self.assertEquals(a.faultDetail, None)
self.assertEquals(a.faultString, None)
self.assertEquals(a.rootCause, {})
def test_kwargs(self):
a = messages.ErrorMessage(extendedData='foo', faultCode='bar',
faultDetail='baz', faultString='gak', rootCause='spam', foo='bar')
self.assertEquals(a.extendedData, 'foo')
self.assertEquals(a.faultCode, 'bar')
self.assertEquals(a.faultDetail, 'baz')
self.assertEquals(a.faultString, 'gak')
self.assertEquals(a.rootCause, 'spam')
self.assertFalse(hasattr(a, 'foo'))
def test_alias(self):
alias = pyamf.get_class_alias(messages.ErrorMessage)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertFalse(alias.external)
self.assertEquals(alias.static_attrs, ['body', 'clientId',
'correlationId', 'destination', 'extendedData', 'faultCode',
'faultDetail', 'faultString', 'headers', 'messageId', 'rootCause',
'timeToLive', 'timestamp'])
class RemotingMessageTestCase(unittest.TestCase):
"""
Tests for :class:`messages.RemotingMessage`
"""
def test_init(self):
a = messages.RemotingMessage()
self.assertEquals(a.body, None)
self.assertEquals(a.timestamp, None)
self.assertEquals(a.destination, None)
self.assertEquals(a.clientId, None)
self.assertEquals(a.headers, {})
self.assertEquals(a.timeToLive, None)
self.assertEquals(a.messageId, None)
self.assertEquals(a.operation, None)
self.assertEquals(a.source, None)
def test_kwargs(self):
a = messages.RemotingMessage(operation='foo', source='bar', foo='bar')
self.assertEquals(a.operation, 'foo')
self.assertEquals(a.source, 'bar')
self.assertFalse(hasattr(a, 'foo'))
def test_alias(self):
alias = pyamf.get_class_alias(messages.RemotingMessage)
alias.compile()
self.assertTrue(alias.sealed)
self.assertFalse(alias.dynamic)
self.assertFalse(alias.external)
self.assertEquals(alias.static_attrs, ['body', 'clientId',
'destination', 'headers', 'messageId', 'operation', 'source',
'timeToLive', 'timestamp'])
class EncodingTestCase(unittest.TestCase):
"""
Encoding tests for :mod:`messages`
"""
def test_AsyncMessage(self):
m = messages.AsyncMessage()
m.correlationId = '1234'
self.assertEquals(pyamf.encode(m, encoding=pyamf.AMF3).getvalue(),
'\n\x81\x03Iflex.messaging.messages.AsyncMessage\tbody'
'\x11clientId\x1bcorrelationId\x17destination\x0fheaders\x13'
'messageId\x15timeToLive\x13timestamp\x01\x01\x06\t1234\x01\n\x0b'
'\x01\x01\x01\x01\x01')
def test_AcknowledgeMessage(self):
m = messages.AcknowledgeMessage()
m.correlationId = '1234'
self.assertEquals(pyamf.encode(m, encoding=pyamf.AMF3).getvalue(),
'\n\x81\x03Uflex.messaging.messages.AcknowledgeMessage\tbody'
'\x11clientId\x1bcorrelationId\x17destination\x0fheaders\x13'
'messageId\x15timeToLive\x13timestamp\x01\x01\x06\t1234\x01\n\x0b'
'\x01\x01\x01\x01\x01')
def test_CommandMessage(self):
m = messages.CommandMessage(operation='foo.bar')
self.assertEquals(pyamf.encode(m, encoding=pyamf.AMF3).getvalue(),
'\n\x81\x13Mflex.messaging.messages.CommandMessage\tbody\x11'
'clientId\x1bcorrelationId\x17destination\x0fheaders\x13messageId'
'\x13operation\x15timeToLive\x13timestamp\x01\x01\x01\x01\n\x0b'
'\x01\x01\x01\x06\x0ffoo.bar\x01\x01')
def test_ErrorMessage(self):
m = messages.ErrorMessage(faultString='ValueError')
self.assertEquals(pyamf.encode(m, encoding=pyamf.AMF3).getvalue(),
'\n\x81SIflex.messaging.messages.ErrorMessage\tbody\x11'
'clientId\x1bcorrelationId\x17destination\x19extendedData\x13'
'faultCode\x17faultDetail\x17faultString\x0fheaders\x13messageId'
'\x13rootCause\x15timeToLive\x13timestamp\x01\x01\x01\x01\n\x0b'
'\x01\x01\x01\x01\x06\x15ValueError\n\x05\x01\x01\n\x05\x01\x01'
'\x01')
def test_RemotingMessage(self):
m = messages.RemotingMessage(source='foo.bar')
self.assertEquals(pyamf.encode(m).getvalue(),
'\n\x81\x13Oflex.messaging.messages.RemotingMessage'
'\tbody\x11clientId\x17destination\x0fheaders\x13messageId\x13'
'operation\rsource\x15timeToLive\x13timestamp\x01\x01\x01\n\x0b'
'\x01\x01\x01\x01\x06\x0ffoo.bar\x01\x01') | unknown | codeparrot/codeparrot-clean | ||
import logging
import urllib
import re
import metadata_parser
import json
import pika
import gevent
import pickle
from django.template.loader import render_to_string
from socketio.namespace import BaseNamespace
from socketio.mixins import RoomsMixin, BroadcastMixin
from socketio.sdjango import namespace
from chat.models import Channel, Message
from chat.forms import MessageForm
class BroadcastEventOnlyMyMixin(BroadcastMixin):
def broadcast_event_only_me(self, event, *args):
pkt = dict(type="event",
name=event,
args=args,
endpoint=self.ns_name)
for sessid, socket in self.socket.server.sockets.iteritems():
if socket is self.socket:
socket.send_packet(pkt)
def emit_to_room_with_me(self, room, event, *args):
"""This is sent to all in the room (in this particular Namespace)"""
pkt = dict(type="event",
name=event,
args=args,
endpoint=self.ns_name)
room_name = self._get_room_name(room)
for sessid, socket in self.socket.server.sockets.iteritems():
# print socket.session['rooms']
# print '======================'
# print room_name
# print event
# args.update({sessid: socket.session['rooms']})
if 'rooms' not in socket.session:
continue
if room_name in socket.session['rooms']:
socket.send_packet(pkt)
@namespace('/msg')
class MsgNamespace(BaseNamespace, RoomsMixin, BroadcastEventOnlyMyMixin):
def initialize(self):
self.logger = logging.getLogger("socketio.msg")
self.log("Socketio session started")
def log(self, message):
self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
def on_join(self, room, channel_id):
self.room = room
self.join(room)
response = {}
messages = Message.objects.filter(channel=channel_id)
result = render_to_string('chat/msg_list.html', {'messages': messages})
response['action'] = 'connect'
response['thread_id'] = channel_id
response['result'] = result
self.broadcast_event_only_me('message', response)
return True
def on_message(self, message):
self.log('User message: {0}'.format(message))
data = {}
data['message'] = urllib.unquote(message['message'].encode('utf-8')).decode("utf-8")
form = MessageForm(data)
if form.is_valid():
object = form.save(commit=False)
object.sender_id = message['sender']
object.channel_id = message['channel']
object.save()
message.clear()
message['room'] = self._get_room_name(object.channel_id)
message['action'] = 'new_message'
message['result'] = render_to_string('chat/msg_detail.html',
{'msg': object})
# parse url
match = re.search(r'http://[a-zA-Z0-9]+\.[-a-zA-Z0-9_]+/*', object.message)
if match:
try:
url = metadata_parser.MetadataParser(url=object.message)
meta = url.metadata.get('meta')
page = url.metadata.get('page')
img = meta.get('og:image', None)
title = urllib.unquote(page['title'].encode('utf-8')).decode("utf-8")
message['result'] = render_to_string('chat/url_parse.html',
{'img': img,
'title':title,
'obj': object})
except:
pass
else:
message.clear()
message['action'] = 'error'
self.emit_to_room_with_me(object.channel_id, 'message', message)
return True | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb service-group objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: a10_service_group
version_added: 1.8
short_description: Manage A10 Networks devices' service groups
description:
- Manage slb service-group objects on A10 Networks devices via aXAPI
author: "Mischa Peters (@mischapeters)"
notes:
- Requires A10 Networks aXAPI 2.1
- When a server doesn't exist and is added to the service-group the server will be created
options:
host:
description:
- hostname or ip of your A10 Networks device
required: true
default: null
aliases: []
choices: []
username:
description:
- admin account of your A10 Networks device
required: true
default: null
aliases: ['user', 'admin']
choices: []
password:
description:
- admin password of your A10 Networks device
required: true
default: null
aliases: ['pass', 'pwd']
choices: []
service_group:
description:
- slb service-group name
required: true
default: null
aliases: ['service', 'pool', 'group']
choices: []
service_group_protocol:
description:
- slb service-group protocol
required: false
default: tcp
aliases: ['proto', 'protocol']
choices: ['tcp', 'udp']
service_group_method:
description:
- slb service-group loadbalancing method
required: false
default: round-robin
aliases: ['method']
choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']
servers:
description:
- A list of servers to add to the service group. Each list item should be a
dictionary which specifies the C(server:) and C(port:), but can also optionally
specify the C(status:). See the examples below for details.
required: false
default: null
aliases: []
choices: []
write_config:
description:
- If C(yes), any changes will cause a write of the running configuration
to non-volatile memory. This will save I(all) configuration changes,
including those that may have been made manually or through other modules,
so care should be taken when specifying C(yes).
required: false
default: "no"
choices: ["yes", "no"]
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
EXAMPLES = '''
# Create a new service-group
- a10_service_group:
host: a10.mydomain.com
username: myadmin
password: mypassword
service_group: sg-80-tcp
servers:
- server: foo1.mydomain.com
port: 8080
- server: foo2.mydomain.com
port: 8080
- server: foo3.mydomain.com
port: 8080
- server: foo4.mydomain.com
port: 8080
status: disabled
'''
VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method']
VALID_SERVER_FIELDS = ['server', 'port', 'status']
def validate_servers(module, servers):
for item in servers:
for key in item:
if key not in VALID_SERVER_FIELDS:
module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS)))
# validate the server name is present
if 'server' not in item:
module.fail_json(msg="server definitions must define the server field")
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="server port definitions must be integers")
else:
module.fail_json(msg="server definitions must define the port field")
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True),
service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']),
service_group_method=dict(type='str', default='round-robin',
aliases=['method'],
choices=['round-robin',
'weighted-rr',
'least-connection',
'weighted-least-connection',
'service-least-connection',
'service-weighted-least-connection',
'fastest-response',
'least-request',
'round-robin-strict',
'src-ip-only-hash',
'src-ip-hash']),
servers=dict(type='list', aliases=['server', 'member'], default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
state = module.params['state']
write_config = module.params['write_config']
slb_service_group = module.params['service_group']
slb_service_group_proto = module.params['service_group_protocol']
slb_service_group_method = module.params['service_group_method']
slb_servers = module.params['servers']
if slb_service_group is None:
module.fail_json(msg='service_group is required')
axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json'
load_balancing_methods = {'round-robin': 0,
'weighted-rr': 1,
'least-connection': 2,
'weighted-least-connection': 3,
'service-least-connection': 4,
'service-weighted-least-connection': 5,
'fastest-response': 6,
'least-request': 7,
'round-robin-strict': 8,
'src-ip-only-hash': 14,
'src-ip-hash': 15}
if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp':
protocol = 2
else:
protocol = 3
# validate the server data list structure
validate_servers(module, slb_servers)
json_post = {
'service_group': {
'name': slb_service_group,
'protocol': protocol,
'lb_method': load_balancing_methods[slb_service_group_method],
}
}
# first we authenticate to get a session id
session_url = axapi_authenticate(module, axapi_base_url, username, password)
# then we check to see if the specified group exists
slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
slb_service_group_exist = not axapi_failure(slb_result)
changed = False
if state == 'present':
# before creating/updating we need to validate that servers
# defined in the servers list exist to prevent errors
checked_servers = []
for server in slb_servers:
result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']}))
if axapi_failure(result):
module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server'])
checked_servers.append(server['server'])
if not slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
else:
# check to see if the service group definition without the
# server members is different, and update that individually
# if it needs it
do_update = False
for field in VALID_SERVICE_GROUP_FIELDS:
if json_post['service_group'][field] != slb_result['service_group'][field]:
do_update = True
break
if do_update:
result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg=result['response']['err']['msg'])
changed = True
# next we pull the defined list of servers out of the returned
# results to make it a bit easier to iterate over
defined_servers = slb_result.get('service_group', {}).get('member_list', [])
# next we add/update new member servers from the user-specified
# list if they're different or not on the target device
for server in slb_servers:
found = False
different = False
for def_server in defined_servers:
if server['server'] == def_server['server']:
found = True
for valid_field in VALID_SERVER_FIELDS:
if server[valid_field] != def_server[valid_field]:
different = True
break
if found or different:
break
# add or update as required
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data))
changed = True
elif different:
result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data))
changed = True
# finally, remove any servers that are on the target
# device but were not specified in the list given
for server in defined_servers:
found = False
for slb_server in slb_servers:
if server['server'] == slb_server['server']:
found = True
break
# remove if not found
server_data = {
"name": slb_service_group,
"member": server,
}
if not found:
result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data))
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group}))
else:
result = slb_result
elif state == 'absent':
if slb_service_group_exist:
result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group}))
changed = True
else:
result = dict(msg="the service group was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
from ansible.module_utils.a10 import *
main() | unknown | codeparrot/codeparrot-clean | ||
////////////////////////////////////////////////////////////////////////////
//
// Copyright 2021 Realm Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
////////////////////////////////////////////////////////////////////////////
#ifndef SCHEMA_VERSION_1
#define SCHEMA_VERSION_1 0
#endif
#if SCHEMA_VERSION_1
#import <Foundation/Foundation.h>
#import <Realm/Realm.h>
#pragma mark - Schema
NSInteger schemaVersion = 1;
// Changes from previous version:
// - combine `firstName` and `lastName` into `fullName`
@interface Person : RLMObject
@property NSString *fullName;
@property NSInteger age;
+ (Person *)personWithFullName:(NSString *)fullName age:(int)age;
@end
@implementation Person
+ (Person *)personWithFullName:(NSString *)fullName age:(int)age {
Person *person = [[self alloc] init];
person.fullName = fullName;
person.age = age;
return person;
}
+ (NSArray *)requiredProperties {
return @[@"fullName", @"age"];
}
@end
#pragma mark - Migration
// Migration block to migrate from *any* previous version to this version.
RLMMigrationBlock migrationBlock = ^(RLMMigration *migration, uint64_t oldSchemaVersion) {
if (oldSchemaVersion < 1) {
[migration enumerateObjects:Person.className block:^(RLMObject *oldObject, RLMObject *newObject) {
if (oldSchemaVersion < 1) {
// combine name fields into a single field
newObject[@"fullName"] = [NSString stringWithFormat:@"%@ %@", oldObject[@"firstName"], oldObject[@"lastName"]];
}
}];
}
};
// This block checks if the migration led to the expected result.
// All older versions should have been migrated to the below stated `exampleData`.
typedef void (^MigrationCheck) (RLMRealm *realm);
MigrationCheck migrationCheck = ^(RLMRealm *realm) {
RLMResults<Person *> *persons = [Person allObjects];
assert(persons.count == 3);
assert([persons[0].fullName isEqualToString:@"John Doe"]);
assert(persons[0].age == 42);
assert([persons[1].fullName isEqualToString:@"Jane Doe"]);
assert(persons[1].age == 43);
assert([persons[2].fullName isEqualToString:@"John Smith"]);
assert(persons[2].age == 44);
};
#pragma mark - Example data
// Example data for this schema version.
typedef void (^ExampleData) (RLMRealm *realm);
ExampleData exampleData = ^(RLMRealm *realm) {
Person *person1 = [Person personWithFullName:@"John Doe" age: 42];
Person *person2 = [Person personWithFullName:@"Jane Doe" age: 43];
Person *person3 = [Person personWithFullName:@"John Smith" age: 44];
[realm addObjects:@[person1, person2, person3]];
};
#endif | c | github | https://github.com/realm/realm-swift | examples/ios/objc/Migration/Examples/Example_v1.h |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Created on February 27, 2012
A filter that disallows any paths that contain defined forbidden characters or
that exceed a defined length.
Place early in the proxy-server pipeline after the left-most occurrence of the
``proxy-logging`` middleware (if present) and before the final
``proxy-logging`` middleware (if present) or the ``proxy-serer`` app itself,
e.g.::
[pipeline:main]
pipeline = catch_errors healthcheck proxy-logging name_check cache \
ratelimit tempauth sos proxy-logging proxy-server
[filter:name_check]
use = egg:swift#name_check
forbidden_chars = '"`<>
maximum_length = 255
There are default settings for forbidden_chars (FORBIDDEN_CHARS) and
maximum_length (MAX_LENGTH)
The filter returns HTTPBadRequest if path is invalid.
@author: eamonn-otoole
'''
import re
from swift.common.utils import get_logger
from urllib2 import unquote
from swift.common.swob import Request, HTTPBadRequest
FORBIDDEN_CHARS = "\'\"`<>"
MAX_LENGTH = 255
FORBIDDEN_REGEXP = "/\./|/\.\./|/\.$|/\.\.$"
class NameCheckMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.conf = conf
self.forbidden_chars = self.conf.get('forbidden_chars',
FORBIDDEN_CHARS)
self.maximum_length = int(self.conf.get('maximum_length', MAX_LENGTH))
self.forbidden_regexp = self.conf.get('forbidden_regexp',
FORBIDDEN_REGEXP)
if self.forbidden_regexp:
self.forbidden_regexp_compiled = re.compile(self.forbidden_regexp)
else:
self.forbidden_regexp_compiled = None
self.logger = get_logger(self.conf, log_route='name_check')
def check_character(self, req):
'''
Checks req.path for any forbidden characters
Returns True if there are any forbidden characters
Returns False if there aren't any forbidden characters
'''
self.logger.debug("name_check: path %s" % req.path)
self.logger.debug("name_check: self.forbidden_chars %s" %
self.forbidden_chars)
for c in unquote(req.path):
if c in self.forbidden_chars:
return True
else:
pass
return False
def check_length(self, req):
'''
Checks that req.path doesn't exceed the defined maximum length
Returns True if the length exceeds the maximum
Returns False if the length is <= the maximum
'''
length = len(unquote(req.path))
if length > self.maximum_length:
return True
else:
return False
def check_regexp(self, req):
'''
Checks that req.path doesn't contain a substring matching regexps.
Returns True if there are any forbidden substring
Returns False if there aren't any forbidden substring
'''
if self.forbidden_regexp_compiled is None:
return False
self.logger.debug("name_check: path %s" % req.path)
self.logger.debug("name_check: self.forbidden_regexp %s" %
self.forbidden_regexp)
unquoted_path = unquote(req.path)
match = self.forbidden_regexp_compiled.search(unquoted_path)
return (match is not None)
def __call__(self, env, start_response):
req = Request(env)
if self.check_character(req):
return HTTPBadRequest(
request=req,
body=("Object/Container/Account name contains forbidden "
"chars from %s"
% self.forbidden_chars))(env, start_response)
elif self.check_length(req):
return HTTPBadRequest(
request=req,
body=("Object/Container/Account name longer than the "
"allowed maximum "
"%s" % self.maximum_length))(env, start_response)
elif self.check_regexp(req):
return HTTPBadRequest(
request=req,
body=("Object/Container/Account name contains a forbidden "
"substring from regular expression %s"
% self.forbidden_regexp))(env, start_response)
else:
# Pass on to downstream WSGI component
return self.app(env, start_response)
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def name_check_filter(app):
return NameCheckMiddleware(app, conf)
return name_check_filter | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.fir.symbols
import com.intellij.psi.PsiElement
import org.jetbrains.kotlin.analysis.api.annotations.KaAnnotationList
import org.jetbrains.kotlin.analysis.api.fir.KaFirSession
import org.jetbrains.kotlin.analysis.api.fir.findPsi
import org.jetbrains.kotlin.analysis.api.fir.location
import org.jetbrains.kotlin.analysis.api.lifetime.withValidityAssertion
import org.jetbrains.kotlin.analysis.api.symbols.KaSymbolLocation
import org.jetbrains.kotlin.fir.symbols.impl.FirClassSymbol
import org.jetbrains.kotlin.fir.symbols.impl.FirTypeParameterSymbol
import org.jetbrains.kotlin.lexer.KtTokens
import org.jetbrains.kotlin.name.Name
import org.jetbrains.kotlin.psi.KtTypeParameter
import org.jetbrains.kotlin.types.Variance
internal class KaFirTypeParameterSymbol private constructor(
override val backingPsi: KtTypeParameter?,
override val analysisSession: KaFirSession,
override val lazyFirSymbol: Lazy<FirTypeParameterSymbol>,
) : KaFirTypeParameterSymbolBase<KtTypeParameter>(), KaFirKtBasedSymbol<KtTypeParameter, FirTypeParameterSymbol> {
constructor(declaration: KtTypeParameter, session: KaFirSession) : this(
backingPsi = declaration,
lazyFirSymbol = lazyFirSymbol(declaration, session),
analysisSession = session,
)
constructor(symbol: FirTypeParameterSymbol, session: KaFirSession) : this(
backingPsi = symbol.backingPsiIfApplicable as? KtTypeParameter,
lazyFirSymbol = lazyOf(symbol),
analysisSession = session,
)
override val psi: PsiElement?
get() = withValidityAssertion { backingPsi ?: findPsi() }
override val name: Name
get() = withValidityAssertion { backingPsi?.nameAsSafeName ?: firSymbol.name }
override val annotations: KaAnnotationList
get() = withValidityAssertion { psiOrSymbolAnnotationList() }
override val variance: Variance
get() = withValidityAssertion { backingPsi?.variance ?: firSymbol.variance }
override val isReified: Boolean
get() = withValidityAssertion { backingPsi?.hasModifier(KtTokens.REIFIED_KEYWORD) ?: firSymbol.isReified }
override val location: KaSymbolLocation
get() = withValidityAssertion {
when {
backingPsi != null -> backingPsi.location
firSymbol.containingDeclarationSymbol is FirClassSymbol<*> -> KaSymbolLocation.CLASS
else -> KaSymbolLocation.LOCAL
}
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api-fir/src/org/jetbrains/kotlin/analysis/api/fir/symbols/KaFirTypeParameterSymbol.kt |
#!/usr/bin/python
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import sys
if len(sys.argv) != 3:
print("Usage: publish_migration.py <publish.properties> <directory to write new files>")
exit(1)
filename = sys.argv[1]
new_base_dir = sys.argv[2]
def extract_artifact(line):
splitline = line.split('%')
org = re.sub(r'^revision\.[a-z_]+\.', '', splitline[0])
name = re.sub(r'=.*', '', splitline[1].rstrip())
return (org, name)
with open(filename) as f:
content = f.readlines()
for line in content:
# For each line get the org and name, make a directory with these
# and open the publish file.
artifact = extract_artifact(line)
(org, name) = artifact
publish_dir = os.path.join(new_base_dir, org, name)
if not os.path.exists(publish_dir):
os.makedirs(publish_dir)
with open(os.path.join(publish_dir, 'publish.properties'), 'a') as output:
output.write(line) | unknown | codeparrot/codeparrot-clean | ||
import pytest
import aos_version
from collections import namedtuple
Package = namedtuple('Package', ['name', 'version'])
expected_pkgs = {
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
},
}
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
# all found
[Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
expected_pkgs,
),
(
# found with more specific version
[Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
expected_pkgs,
),
(
[Package('ovs', '2.6'), Package('ovs', '2.4')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_precise_version_found(pkgs, expected_pkgs_dict):
aos_version._check_precise_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expect_not_found', [
(
[],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
},
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # none found
),
(
[Package('spam', '3.2.1')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # completely missing
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
{
"eggs": {
"name": "eggs",
"version": "3.2.1",
"check_multi": False,
}
}, # not the right version
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
{
"spam": {
"name": "spam",
"version": "3.2.1",
"check_multi": False,
}
}, # eggs found with multiple versions
),
])
def test_check_precise_version_found_fail(pkgs, expect_not_found):
with pytest.raises(aos_version.PreciseVersionNotFound) as e:
aos_version._check_precise_version_found(pkgs, expected_pkgs)
assert list(expect_not_found.values()) == e.value.problem_pkgs
@pytest.mark.parametrize('pkgs,expected_pkgs_dict', [
(
[],
expected_pkgs,
),
(
# more precise but not strictly higher
[Package('spam', '3.2.1.9')],
expected_pkgs,
),
(
[Package('ovs', '2.7')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
),
])
def test_check_higher_version_found(pkgs, expected_pkgs_dict):
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
@pytest.mark.parametrize('pkgs,expected_pkgs_dict,expect_higher', [
(
[Package('spam', '3.3')],
expected_pkgs,
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
expected_pkgs,
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
expected_pkgs,
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
expected_pkgs,
['eggs-3.4'], # multiple versions, two are higher
),
(
[Package('ovs', '2.8')],
{
"ovs": {
"name": "ovs",
"version": ["2.6", "2.7"],
"check_multi": False,
}
},
['ovs-2.8'],
),
])
def test_check_higher_version_found_fail(pkgs, expected_pkgs_dict, expect_higher):
with pytest.raises(aos_version.FoundHigherVersion) as e:
aos_version._check_higher_version_found(pkgs, expected_pkgs_dict)
assert set(expect_higher) == set(e.value.problem_pkgs)
@pytest.mark.parametrize('pkgs', [
[],
[Package('spam', '3.2.1')],
[Package('spam', '3.2.1'), Package('eggs', '3.2.2')],
])
def test_check_multi_minor_release(pkgs):
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
@pytest.mark.parametrize('pkgs,expect_to_flag_pkgs', [
(
[Package('spam', '3.2.1'), Package('spam', '3.3.2')],
['spam'],
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
['eggs'],
),
])
def test_check_multi_minor_release_fail(pkgs, expect_to_flag_pkgs):
with pytest.raises(aos_version.FoundMultiRelease) as e:
aos_version._check_multi_minor_release(pkgs, expected_pkgs)
assert set(expect_to_flag_pkgs) == set(e.value.problem_pkgs) | unknown | codeparrot/codeparrot-clean | ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2025, Institute of Software, Chinese Academy of Sciences.
#include "rvv_hal.hpp"
namespace cv { namespace rvv_hal { namespace imgproc {
#if CV_HAL_RVV_1P0_ENABLED
namespace {
class MomentsInvoker : public ParallelLoopBody
{
public:
template<typename... Args>
MomentsInvoker(std::function<int(int, int, Args...)> _func, Args&&... args)
{
func = std::bind(_func, std::placeholders::_1, std::placeholders::_2, std::forward<Args>(args)...);
}
virtual void operator()(const Range& range) const override
{
func(range.start, range.end);
}
private:
std::function<int(int, int)> func;
};
template<typename... Args>
static inline int invoke(int width, int height, std::function<int(int, int, Args...)> func, Args&&... args)
{
cv::parallel_for_(Range(1, height), MomentsInvoker(func, std::forward<Args>(args)...), static_cast<double>((width - 1) * height) / (1 << 10));
return func(0, 1, std::forward<Args>(args)...);
}
template<typename helper> struct rvv;
template<> struct rvv<RVV_U32M2>
{
static inline vuint8mf2_t vid(size_t a) { return __riscv_vid_v_u8mf2(a); }
static inline RVV_U32M2::VecType vcvt(vuint8mf2_t a, size_t b) { return __riscv_vzext_vf4(a, b); }
};
template<> struct rvv<RVV_U32M4>
{
static inline vuint8m1_t vid(size_t a) { return __riscv_vid_v_u8m1(a); }
static inline RVV_U32M4::VecType vcvt(vuint8m1_t a, size_t b) { return __riscv_vzext_vf4(a, b); }
};
template<> struct rvv<RVV_I32M2>
{
static inline vuint8mf2_t vid(size_t a) { return __riscv_vid_v_u8mf2(a); }
static inline RVV_I32M2::VecType vcvt(vuint8mf2_t a, size_t b) { return RVV_I32M2::reinterpret(__riscv_vzext_vf4(a, b)); }
};
template<> struct rvv<RVV_F64M4>
{
static inline vuint8mf2_t vid(size_t a) { return __riscv_vid_v_u8mf2(a); }
static inline RVV_F64M4::VecType vcvt(vuint8mf2_t a, size_t b) { return __riscv_vfcvt_f(__riscv_vzext_vf8(a, b), b); }
};
constexpr int TILE_SIZE = 32;
template<bool binary, typename T, typename helperT, typename helperWT, typename helperMT>
static inline int imageMoments(int start, int end, const uchar* src_data, size_t src_step, int full_width, int full_height, double* m, std::mutex* mt)
{
double mm[10] = {0};
for (int yy = start; yy < end; yy++)
{
const int y = yy * TILE_SIZE;
const int height = std::min(TILE_SIZE, full_height - y);
for (int x = 0; x < full_width; x += TILE_SIZE)
{
const int width = std::min(TILE_SIZE, full_width - x);
double mom[10] = {0};
for (int i = 0; i < height; i++)
{
auto id = rvv<helperWT>::vid(helperT::setvlmax());
auto v0 = helperWT::vmv(0, helperWT::setvlmax());
auto v1 = helperWT::vmv(0, helperWT::setvlmax());
auto v2 = helperWT::vmv(0, helperWT::setvlmax());
auto v3 = helperMT::vmv(0, helperMT::setvlmax());
int vl;
for (int j = 0; j < width; j += vl)
{
vl = helperT::setvl(width - j);
typename helperWT::VecType p;
if (binary)
{
auto src = RVV_SameLen<T, helperT>::vload(reinterpret_cast<const T*>(src_data + (i + y) * src_step) + j + x, vl);
p = __riscv_vmerge(helperWT::vmv(0, vl), helperWT::vmv(255, vl), RVV_SameLen<T, helperT>::vmne(src, 0, vl), vl);
}
else
{
p = helperWT::cast(helperT::vload(reinterpret_cast<const typename helperT::ElemType*>(src_data + (i + y) * src_step) + j + x, vl), vl);
}
auto xx = rvv<helperWT>::vcvt(id, vl);
auto xp = helperWT::vmul(xx, p, vl);
v0 = helperWT::vadd_tu(v0, v0, p, vl);
v1 = helperWT::vadd_tu(v1, v1, xp, vl);
auto xxp = helperWT::vmul(xx, xp, vl);
v2 = helperWT::vadd_tu(v2, v2, xxp, vl);
v3 = helperMT::vadd_tu(v3, v3, helperMT::vmul(helperMT::cast(xx, vl), helperMT::cast(xxp, vl), vl), vl);
id = __riscv_vadd(id, vl, vl);
}
auto x0 = RVV_BaseType<helperWT>::vmv_x(helperWT::vredsum(v0, RVV_BaseType<helperWT>::vmv_s(0, RVV_BaseType<helperWT>::setvlmax()), helperWT::setvlmax()));
auto x1 = RVV_BaseType<helperWT>::vmv_x(helperWT::vredsum(v1, RVV_BaseType<helperWT>::vmv_s(0, RVV_BaseType<helperWT>::setvlmax()), helperWT::setvlmax()));
auto x2 = RVV_BaseType<helperWT>::vmv_x(helperWT::vredsum(v2, RVV_BaseType<helperWT>::vmv_s(0, RVV_BaseType<helperWT>::setvlmax()), helperWT::setvlmax()));
auto x3 = RVV_BaseType<helperMT>::vmv_x(helperMT::vredsum(v3, RVV_BaseType<helperMT>::vmv_s(0, RVV_BaseType<helperMT>::setvlmax()), helperMT::setvlmax()));
typename helperWT::ElemType py = i * x0, sy = i*i;
mom[9] += static_cast<typename helperMT::ElemType>(py) * sy;
mom[8] += static_cast<typename helperMT::ElemType>(x1) * sy;
mom[7] += static_cast<typename helperMT::ElemType>(x2) * i;
mom[6] += x3;
mom[5] += x0 * sy;
mom[4] += x1 * i;
mom[3] += x2;
mom[2] += py;
mom[1] += x1;
mom[0] += x0;
}
if (binary)
{
mom[0] /= 255, mom[1] /= 255, mom[2] /= 255, mom[3] /= 255, mom[4] /= 255;
mom[5] /= 255, mom[6] /= 255, mom[7] /= 255, mom[8] /= 255, mom[9] /= 255;
}
double xm = x * mom[0], ym = y * mom[0];
mm[0] += mom[0];
mm[1] += mom[1] + xm;
mm[2] += mom[2] + ym;
mm[3] += mom[3] + x * (mom[1] * 2 + xm);
mm[4] += mom[4] + x * (mom[2] + ym) + y * mom[1];
mm[5] += mom[5] + y * (mom[2] * 2 + ym);
mm[6] += mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm));
mm[7] += mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3];
mm[8] += mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5];
mm[9] += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym));
}
}
std::lock_guard<std::mutex> lk(*mt);
for (int i = 0; i < 10; i++)
m[i] += mm[i];
return CV_HAL_ERROR_OK;
}
} // anonymous
// the algorithm is copied from imgproc/src/moments.cpp,
// in the function cv::Moments cv::moments
int imageMoments(const uchar* src_data, size_t src_step, int src_type, int width, int height, bool binary, double m[10])
{
if (src_type != CV_16UC1 && src_type != CV_16SC1 && src_type != CV_32FC1 && src_type != CV_64FC1)
return CV_HAL_ERROR_NOT_IMPLEMENTED;
std::fill(m, m + 10, 0);
const int cnt = (height + TILE_SIZE - 1) / TILE_SIZE;
std::mutex mt;
switch (static_cast<int>(binary)*100 + src_type)
{
case CV_16UC1:
return invoke(width, cnt, {imageMoments<false, ushort, RVV_U16M1, RVV_U32M2, RVV_U64M4>}, src_data, src_step, width, height, m, &mt);
case CV_16SC1:
return invoke(width, cnt, {imageMoments<false, short, RVV_I16M1, RVV_I32M2, RVV_I64M4>}, src_data, src_step, width, height, m, &mt);
case CV_32FC1:
return invoke(width, cnt, {imageMoments<false, float, RVV_F32M2, RVV_F64M4, RVV_F64M4>}, src_data, src_step, width, height, m, &mt);
case CV_64FC1:
return invoke(width, cnt, {imageMoments<false, double, RVV_F64M4, RVV_F64M4, RVV_F64M4>}, src_data, src_step, width, height, m, &mt);
case 100 + CV_16UC1:
return invoke(width, cnt, {imageMoments<true, ushort, RVV_U8M1, RVV_U32M4, RVV_U32M4>}, src_data, src_step, width, height, m, &mt);
case 100 + CV_16SC1:
return invoke(width, cnt, {imageMoments<true, short, RVV_U8M1, RVV_U32M4, RVV_U32M4>}, src_data, src_step, width, height, m, &mt);
case 100 + CV_32FC1:
return invoke(width, cnt, {imageMoments<true, float, RVV_U8M1, RVV_U32M4, RVV_U32M4>}, src_data, src_step, width, height, m, &mt);
case 100 + CV_64FC1:
return invoke(width, cnt, {imageMoments<true, double, RVV_U8M1, RVV_U32M4, RVV_U32M4>}, src_data, src_step, width, height, m, &mt);
}
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
#endif // CV_HAL_RVV_1P0_ENABLED
}}} // cv::rvv_hal::imgproc | cpp | github | https://github.com/opencv/opencv | hal/riscv-rvv/src/imgproc/moments.cpp |
<!--
Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
-->
# The curl HTTP Test Suite
This is an additional test suite using a combination of Apache httpd and
nghttpx servers to perform various tests beyond the capabilities of the
standard curl test suite.
# Usage
The test cases and necessary files are in `tests/http`. You can invoke
`pytest` from there or from the top level curl checkout and it finds all
tests.
```
curl> pytest test/http
platform darwin -- Python 3.9.15, pytest-6.2.0, py-1.10.0, pluggy-0.13.1
rootdir: /Users/sei/projects/curl
collected 5 items
tests/http/test_01_basic.py .....
```
Pytest takes arguments. `-v` increases its verbosity and can be used several
times. `-k <expr>` can be used to run only matching test cases. The `expr` can
be something resembling a python test or just a string that needs to match
test cases in their names.
```
curl/tests/http> pytest -vv -k test_01_02
```
runs all test cases that have `test_01_02` in their name. This does not have
to be the start of the name.
Depending on your setup, some test cases may be skipped and appear as `s` in
the output. If you run pytest verbose, it also gives you the reason for
skipping.
# Prerequisites
You need:
1. a recent Python, the `cryptography` module and, of course, `pytest`
2. an apache httpd development version. On Debian/Ubuntu, the package
`apache2-dev` has this
3. a local `curl` project build
3. optionally, a `nghttpx` with HTTP/3 enabled or h3 test cases are skipped
### Configuration
Via curl's `configure` script you may specify:
* `--with-test-nghttpx=<path-of-nghttpx>` if you have nghttpx to use
somewhere outside your `$PATH`.
* `--with-test-httpd=<httpd-install-path>` if you have an Apache httpd
installed somewhere else. On Debian/Ubuntu it otherwise looks into
`/usr/bin` and `/usr/sbin` to find those.
* `--with-test-caddy=<caddy-install-path>` if you have a Caddy web server
installed somewhere else.
* `--with-test-vsftpd=<vsftpd-install-path>` if you have a vsftpd ftp
server installed somewhere else.
* `--with-test-danted=<danted-path>` if you have `dante-server` installed
## Usage Tips
Several test cases are parameterized, for example with the HTTP version to
use. If you want to run a test with a particular protocol only, use a command
line like:
```
curl/tests/http> pytest -k "test_02_06 and h2"
```
Test cases can be repeated, with the `pytest-repeat` module (`pip install
pytest-repeat`). Like in:
```
curl/tests/http> pytest -k "test_02_06 and h2" --count=100
```
which then runs this test case a hundred times. In case of flaky tests, you
can make pytest stop on the first one with:
```
curl/tests/http> pytest -k "test_02_06 and h2" --count=100 --maxfail=1
```
which allow you to inspect output and log files for the failed run. Speaking
of log files, the verbosity of pytest is also used to collect curl trace
output. If you specify `-v` three times, the `curl` command is started with
`--trace`:
```
curl/tests/http> pytest -vvv -k "test_02_06 and h2" --count=100 --maxfail=1
```
all of curl's output and trace file are found in `tests/http/gen/curl`.
## Writing Tests
There is a lot of [`pytest` documentation](https://docs.pytest.org/) with
examples. No use in repeating that here. Assuming you are somewhat familiar
with it, it is useful how *this* general test suite is setup. Especially if
you want to add test cases.
### Servers
In `conftest.py` 3 "fixtures" are defined that are used by all test cases:
1. `env`: the test environment. It is an instance of class
`testenv/env.py:Env`. It holds all information about paths, availability of
features (HTTP/3), port numbers to use, domains and SSL certificates for
those.
2. `httpd`: the Apache httpd instance, configured and started, then stopped at
the end of the test suite. It has sites configured for the domains from
`env`. It also loads a local module `mod_curltest?` and makes it available
in certain locations. (more on mod_curltest below).
3. `nghttpx`: an instance of nghttpx that provides HTTP/3 support. `nghttpx`
proxies those requests to the `httpd` server. In a direct mapping, so you
may access all the resources under the same path as with HTTP/2. Only the
port number used for HTTP/3 requests are different.
`pytest` manages these fixture so that they are created once and terminated
before exit. This means you can `Ctrl-C` a running pytest and the server then
shutdowns. Only when you brutally chop its head off, might there be servers
left behind.
### Test Cases
Tests making use of these fixtures have them in their parameter list. This
tells pytest that a particular test needs them, so it has to create them.
Since one can invoke pytest for just a single test, it is important that a
test references the ones it needs.
All test cases start with `test_` in their name. We use a double number scheme
to group them. This makes it ease to run only specific tests and also give a
short mnemonic to communicate trouble with others in the project. Otherwise
you are free to name test cases as you think fitting.
Tests are grouped thematically in a file with a single Python test class. This
is convenient if you need a special "fixture" for several tests. "fixtures"
can have "class" scope.
There is a curl helper class that knows how to invoke curl and interpret its
output. Among other things, it does add the local CA to the command line, so
that SSL connections to the test servers are verified. Nothing prevents anyone
from running curl directly, for specific uses not covered by the `CurlClient`
class.
### mod_curltest
The module source code is found in `testenv/mod_curltest`. It is compiled
using the `apxs` command, commonly provided via the `apache2-dev` package.
Compilation is quick and done once at the start of a test run.
The module adds 2 "handlers" to the Apache server (right now). Handler are
pieces of code that receive HTTP requests and generate the response. Those
handlers are:
* `curltest-echo`: hooked up on the path `/curltest/echo`. This one echoes
a request and copies all data from the request body to the response body.
Useful for simulating upload and checking that the data arrived as intended.
* `curltest-tweak`: hooked up on the path `/curltest/tweak`. This handler is
more of a Swiss army knife. It interprets parameters from the URL query
string to drive its behavior.
* `status=nnn`: generate a response with HTTP status code `nnn`.
* `chunks=n`: generate `n` chunks of data in the response body, defaults to 3.
* `chunk_size=nnn`: each chunk should contain `nnn` bytes of data. Maximum is 16KB right now.
* `chunkd_delay=duration`: wait `duration` time between writing chunks
* `delay=duration`: wait `duration` time to send the response headers
* `body_error=(timeout|reset)`: produce an error after the first chunk in the response body
* `id=str`: add `str` in the response header `request-id`
`duration` values are integers, optionally followed by a unit. Units are:
* `d`: days (probably not useful here)
* `h`: hours
* `mi`: minutes
* `s`: seconds (the default)
* `ms`: milliseconds
As you can see, `mod_curltest`'s tweak handler allows Apache to simulate many
kinds of responses. An example of its use is `test_03_01` where responses are
delayed using `chunk_delay`. This gives the response a defined duration and the
test uses that to reload `httpd` in the middle of the first request. A graceful
reload in httpd lets ongoing requests finish, but closes the connection
afterwards and tears down the serving process. The following request then needs
to open a new connection. This is verified by the test case. | unknown | github | https://github.com/curl/curl | docs/tests/HTTP.md |
import serial
import re
from time import sleep
from datetime import datetime, timedelta
from .tty import Terminal
# -------------------------------------------------------------------------
# Terminal connection over SERIAL CONSOLE
# -------------------------------------------------------------------------
_PROMPT = re.compile('|'.join(Terminal._RE_PAT))
class Serial(Terminal):
def __init__(self, port='/dev/ttyUSB0', **kvargs):
"""
:port:
the serial port, defaults to USB0 since this
:kvargs['timeout']:
this is the tty read polling timeout.
generally you should not have to tweak this.
"""
# initialize the underlying TTY device
self.port = port
self._ser = serial.Serial()
self._ser.port = port
self._ser.timeout = kvargs.get('timeout', self.TIMEOUT)
self._tty_name = self.port
Terminal.__init__(self, **kvargs)
# -------------------------------------------------------------------------
# I/O open close called from Terminal class
# -------------------------------------------------------------------------
def _tty_open(self):
try:
self._ser.open()
except OSError as err:
raise RuntimeError("open_failed:{0}".format(err.strerror))
self.write('\n\n\n') # hit <ENTER> a few times, yo!
def _tty_close(self):
self._ser.flush()
self._ser.close()
# -------------------------------------------------------------------------
# I/O read and write called from Terminal class
# -------------------------------------------------------------------------
def write(self, content):
""" write content + <RETURN> """
self._ser.write(content + '\n')
self._ser.flush()
def rawwrite(self, content):
self._ser.write(content)
def read(self):
""" read a single line """
return self._ser.readline()
def read_prompt(self):
"""
reads text from the serial console (using readline) until
a match is found against the :expect: regular-expression object.
When a match is found, return a tuple(<text>,<found>) where
<text> is the complete text and <found> is the name of the
regular-expression group. If a timeout occurs, then return
the tuple(None,None).
"""
rxb = ''
mark_start = datetime.now()
mark_end = mark_start + timedelta(seconds=self.EXPECT_TIMEOUT)
while datetime.now() < mark_end:
sleep(0.1) # do not remove
line = self._ser.readline()
if not line:
continue
rxb += line
found = _PROMPT.search(rxb)
if found is not None:
break # done reading
else:
# exceeded the while loop timeout
return (None, None)
return (rxb, found.lastgroup) | unknown | codeparrot/codeparrot-clean | ||
"""basic inference routines"""
from __future__ import annotations
from collections import abc
from numbers import Number
import re
from re import Pattern
from typing import (
TYPE_CHECKING,
TypeGuard,
)
import numpy as np
from pandas._libs import lib
from pandas.util._decorators import set_module
if TYPE_CHECKING:
from collections.abc import Hashable
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
is_scalar = lib.is_scalar
is_decimal = lib.is_decimal
is_list_like = lib.is_list_like
is_iterator = lib.is_iterator
@set_module("pandas.api.types")
def is_number(obj: object) -> TypeGuard[Number | np.number]:
"""
Check if the object is a number.
Returns True when the object is a number, and False if is not.
Parameters
----------
obj : any type
The object to check if is a number.
Returns
-------
bool
Whether `obj` is a number or not.
See Also
--------
api.types.is_integer: Checks a subgroup of numbers.
Examples
--------
>>> from pandas.api.types import is_number
>>> is_number(1)
True
>>> is_number(7.15)
True
Booleans are valid because they are int subclass.
>>> is_number(False)
True
>>> is_number("foo")
False
>>> is_number("5")
False
"""
return isinstance(obj, (Number, np.number))
def iterable_not_string(obj: object) -> bool:
"""
Check if the object is an iterable but not a string.
Parameters
----------
obj : The object to check.
Returns
-------
is_iter_not_string : bool
Whether `obj` is a non-string iterable.
Examples
--------
>>> iterable_not_string([1, 2, 3])
True
>>> iterable_not_string("foo")
False
>>> iterable_not_string(1)
False
"""
return isinstance(obj, abc.Iterable) and not isinstance(obj, str)
@set_module("pandas.api.types")
def is_file_like(obj: object) -> bool:
"""
Check if the object is a file-like object.
For objects to be considered file-like, they must
be an iterator AND have either a `read` and/or `write`
method as an attribute.
Note: file-like objects must be iterable, but
iterable objects need not be file-like.
Parameters
----------
obj : object
The object to check for file-like properties.
This can be any Python object, and the function will
check if it has attributes typically associated with
file-like objects (e.g., `read`, `write`, `__iter__`).
Returns
-------
bool
Whether `obj` has file-like properties.
See Also
--------
api.types.is_dict_like : Check if the object is dict-like.
api.types.is_hashable : Return True if hash(obj) will succeed, False otherwise.
api.types.is_named_tuple : Check if the object is a named tuple.
api.types.is_iterator : Check if the object is an iterator.
Examples
--------
>>> import io
>>> from pandas.api.types import is_file_like
>>> buffer = io.StringIO("data")
>>> is_file_like(buffer)
True
>>> is_file_like([1, 2, 3])
False
"""
if not (hasattr(obj, "read") or hasattr(obj, "write")):
return False
return bool(hasattr(obj, "__iter__"))
@set_module("pandas.api.types")
def is_re(obj: object) -> TypeGuard[Pattern]:
"""
Check if the object is a regex pattern instance.
Parameters
----------
obj : object
The object to check for being a regex pattern. Typically,
this would be an object that you expect to be a compiled
pattern from the `re` module.
Returns
-------
bool
Whether `obj` is a regex pattern.
See Also
--------
api.types.is_float : Return True if given object is float.
api.types.is_iterator : Check if the object is an iterator.
api.types.is_integer : Return True if given object is integer.
api.types.is_re_compilable : Check if the object can be compiled
into a regex pattern instance.
Examples
--------
>>> from pandas.api.types import is_re
>>> import re
>>> is_re(re.compile(".*"))
True
>>> is_re("foo")
False
"""
return isinstance(obj, Pattern)
@set_module("pandas.api.types")
def is_re_compilable(obj: object) -> bool:
"""
Check if the object can be compiled into a regex pattern instance.
Parameters
----------
obj : The object to check
The object to check if the object can be compiled into a regex pattern instance.
Returns
-------
bool
Whether `obj` can be compiled as a regex pattern.
See Also
--------
api.types.is_re : Check if the object is a regex pattern instance.
Examples
--------
>>> from pandas.api.types import is_re_compilable
>>> is_re_compilable(".*")
True
>>> is_re_compilable(1)
False
"""
try:
re.compile(obj) # type: ignore[call-overload]
except TypeError:
return False
else:
return True
@set_module("pandas.api.types")
def is_array_like(obj: object) -> bool:
"""
Check if the object is array-like.
For an object to be considered array-like, it must be list-like and
have a `dtype` attribute.
Parameters
----------
obj : The object to check
Returns
-------
is_array_like : bool
Whether `obj` has array-like properties.
Examples
--------
>>> is_array_like(np.array([1, 2, 3]))
True
>>> is_array_like(pd.Series(["a", "b"]))
True
>>> is_array_like(pd.Index(["2016-01-01"]))
True
>>> is_array_like([1, 2, 3])
False
>>> is_array_like(("a", "b"))
False
"""
return is_list_like(obj) and hasattr(obj, "dtype")
def is_nested_list_like(obj: object) -> bool:
"""
Check if the object is list-like, and that all of its elements
are also list-like.
Parameters
----------
obj : The object to check
Returns
-------
is_list_like : bool
Whether `obj` has list-like properties.
Examples
--------
>>> is_nested_list_like([[1, 2, 3]])
True
>>> is_nested_list_like([{1, 2, 3}, {1, 2, 3}])
True
>>> is_nested_list_like(["foo"])
False
>>> is_nested_list_like([])
False
>>> is_nested_list_like([[1, 2, 3], 1])
False
Notes
-----
This won't reliably detect whether a consumable iterator (e. g.
a generator) is a nested-list-like without consuming the iterator.
To avoid consuming it, we always return False if the outer container
doesn't define `__len__`.
See Also
--------
is_list_like
"""
return (
is_list_like(obj)
and hasattr(obj, "__len__")
# need PEP 724 to handle these typing errors
and len(obj) > 0 # pyright: ignore[reportArgumentType]
and all(is_list_like(item) for item in obj) # type: ignore[attr-defined]
)
@set_module("pandas.api.types")
def is_dict_like(obj: object) -> bool:
"""
Check if the object is dict-like.
Parameters
----------
obj : object
The object to check. This can be any Python object,
and the function will determine whether it
behaves like a dictionary.
Returns
-------
bool
Whether `obj` has dict-like properties.
See Also
--------
api.types.is_list_like : Check if the object is list-like.
api.types.is_file_like : Check if the object is a file-like.
api.types.is_named_tuple : Check if the object is a named tuple.
Examples
--------
>>> from pandas.api.types import is_dict_like
>>> is_dict_like({1: 2})
True
>>> is_dict_like([1, 2, 3])
False
>>> is_dict_like(dict)
False
>>> is_dict_like(dict())
True
"""
dict_like_attrs = ("__getitem__", "keys", "__contains__")
return (
all(hasattr(obj, attr) for attr in dict_like_attrs)
# [GH 25196] exclude classes
and not isinstance(obj, type)
)
@set_module("pandas.api.types")
def is_named_tuple(obj: object) -> bool:
"""
Check if the object is a named tuple.
Parameters
----------
obj : object
The object that will be checked to determine
whether it is a named tuple.
Returns
-------
bool
Whether `obj` is a named tuple.
See Also
--------
api.types.is_dict_like: Check if the object is dict-like.
api.types.is_hashable: Return True if hash(obj)
will succeed, False otherwise.
api.types.is_categorical_dtype : Check if the dtype is categorical.
Examples
--------
>>> from collections import namedtuple
>>> from pandas.api.types import is_named_tuple
>>> Point = namedtuple("Point", ["x", "y"])
>>> p = Point(1, 2)
>>>
>>> is_named_tuple(p)
True
>>> is_named_tuple((1, 2))
False
"""
return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
@set_module("pandas.api.types")
def is_hashable(obj: object, allow_slice: bool = True) -> TypeGuard[Hashable]:
"""
Return True if hash(obj) will succeed, False otherwise.
Some types will pass a test against collections.abc.Hashable but fail when
they are actually hashed with hash().
Distinguish between these and other types by trying the call to hash() and
seeing if they raise TypeError.
Parameters
----------
obj : object
The object to check for hashability. Any Python object can be passed here.
allow_slice : bool
If True, return True if the object is hashable (including slices).
If False, return True if the object is hashable and not a slice.
Returns
-------
bool
True if object can be hashed (i.e., does not raise TypeError when
passed to hash()) and passes the slice check according to 'allow_slice'.
False otherwise (e.g., if object is mutable like a list or dictionary
or if allow_slice is False and object is a slice or contains a slice).
See Also
--------
api.types.is_float : Return True if given object is float.
api.types.is_iterator : Check if the object is an iterator.
api.types.is_list_like : Check if the object is list-like.
api.types.is_dict_like : Check if the object is dict-like.
Examples
--------
>>> import collections
>>> from pandas.api.types import is_hashable
>>> a = ([],)
>>> isinstance(a, collections.abc.Hashable)
True
>>> is_hashable(a)
False
"""
# Unfortunately, we can't use isinstance(obj, collections.abc.Hashable),
# which can be faster than calling hash. That is because numpy scalars
# fail this test.
# Reconsider this decision once this numpy bug is fixed:
# https://github.com/numpy/numpy/issues/5562
if allow_slice is False:
if isinstance(obj, tuple) and any(isinstance(v, slice) for v in obj):
return False
elif isinstance(obj, slice):
return False
try:
hash(obj)
except TypeError:
return False
else:
return True
def is_sequence(obj: object) -> bool:
"""
Check if the object is a sequence of objects.
String types are not included as sequences here.
Parameters
----------
obj : The object to check
Returns
-------
is_sequence : bool
Whether `obj` is a sequence of objects.
Examples
--------
>>> l = [1, 2, 3]
>>>
>>> is_sequence(l)
True
>>> is_sequence(iter(l))
False
"""
try:
# Can iterate over it.
iter(obj) # type: ignore[call-overload]
# Has a length associated with it.
len(obj) # type: ignore[arg-type]
return not isinstance(obj, (str, bytes))
except (TypeError, AttributeError):
return False
def is_dataclass(item: object) -> bool:
"""
Checks if the object is a data-class instance
Parameters
----------
item : object
Returns
--------
is_dataclass : bool
True if the item is an instance of a data-class,
will return false if you pass the data class itself
Examples
--------
>>> from dataclasses import dataclass
>>> @dataclass
... class Point:
... x: int
... y: int
>>> is_dataclass(Point)
False
>>> is_dataclass(Point(0, 2))
True
"""
try:
import dataclasses
return dataclasses.is_dataclass(item) and not isinstance(item, type)
except ImportError:
return False | python | github | https://github.com/pandas-dev/pandas | pandas/core/dtypes/inference.py |
from typing import Optional, List
import logging
import os
import sqlite3
import tempfile
from time import time
class Storage(object):
path = None
def __init__(self, path: Optional[str] = None, user: Optional[str] = None):
log = logging.getLogger('theonionbox')
self.path = None
if path is None:
path = tempfile.gettempdir()
log.debug("Temp directory identified as {}.".format(path))
path = os.path.abspath(path)
if user is None or user == '':
path = os.path.join(path, '.theonionbox.persist')
else:
path = os.path.join(path, '.theonionbox.{}'.format(user))
attempts = 0
while attempts < 2:
try:
with sqlite3.connect(path) as conn:
sql = "CREATE TABLE IF NOT EXISTS nodes (fp string PRIMARY KEY NOT NULL UNIQUE );"
# The UNIQUE constraint ensures that there's always only one record per interval
sql += """CREATE TABLE IF NOT EXISTS bandwidth (fp int,
interval text(2),
timestamp int,
read int,
write int,
UNIQUE (fp, interval, timestamp)
ON CONFLICT REPLACE
);"""
conn.executescript(sql)
log.notice("Persistance data will be written to '{}'.".format(path))
self.path = path
return
except:
log.notice("Failed to create persistance database @ '{}'.".format(path))
path = ':memory:'
attempts += 1
# At this point there's no persistance db created.
# That's domague - yet inevitable.
self.path = None
def get_path(self) -> str:
return self.path
class BandwidthPersistor(object):
def __init__(self, storage: Storage, fingerprint: str):
self.path = None
self.fp = None
self.fpid = None
log = logging.getLogger('theonionbox')
if len(fingerprint) == 0:
log.debug('Skipped registration for persistance of node with fingerprint of length = 0.')
return
path = storage.get_path()
if path is None:
return
conn = self.open_connection(path)
if conn is None:
return
# register this fingerprint
try:
with conn:
conn.execute("INSERT OR IGNORE INTO nodes(fp) VALUES(?);", (fingerprint,))
except Exception as exc:
log.warning('Failed to register {}... for persistance. {}'.format(fingerprint[:6], exc))
return
fpid = None
r = None
try:
cur = conn.cursor()
cur.execute("SELECT ROWID as id FROM nodes WHERE fp=?", (fingerprint,))
r = cur.fetchone()
except Exception as e:
return
# This indicates that fingerprint was successfully registered
try:
fpid = r['id']
except Exception as e:
return
if fpid is not None:
self.path = path
self.fp = fingerprint
self.fpid = fpid
conn.close()
def open_connection(self, path: Optional[str] = None) -> Optional[sqlite3.Connection]:
if path is None:
path = self.path
if path is not None:
try:
conn = sqlite3.connect(path)
conn.row_factory = sqlite3.Row
return conn
except Exception as e:
log = logging.getLogger('theonionbox')
log.warning('Failed to open connection to storage @ {}.'.format(path))
return None
# This does not commit!
def persist(self, interval: str, timestamp: float,
read: Optional[int] = 0, write: Optional[int] = 0, connection: Optional[sqlite3.Connection] = None) -> bool:
if self.fpid is None:
return False
if connection is None:
connection = self.open_connection()
if connection is None:
return False
try:
connection.execute("INSERT INTO bandwidth(fp, interval, timestamp, read, write) VALUES(?, ?, ?, ?, ?)",
(self.fpid, interval, timestamp, read, write))
except Exception as e:
log = logging.getLogger('theonionbox')
log.warning(f'Failed to open persist bandwidth data for fingerprint {self.fp[:6]}: {e}')
return False
return True
# get the data back from the table
def get(self, interval: str, js_timestamp: Optional[int] = int(time()*1000), limit: Optional[int] = -1,
offset: Optional[int] = 0, connection: Optional[sqlite3.Connection] = None) -> Optional[List[sqlite3.Row]]:
if connection is None:
connection = self.open_connection()
if connection is None:
return None
# some SELECT magic to eliminate the need for later manipulation
cur = connection.cursor()
sql = """
SELECT
:jsts as 's',
timestamp * 1000 as 'm',
read as 'r',
write as 'w'
FROM bandwidth
WHERE fp = :fp AND interval = :interval
ORDER BY timestamp DESC
LIMIT :limit OFFSET :offset
"""
try:
cur.execute(sql, {'jsts': js_timestamp,
'fp': self.fpid,
'interval': interval,
'limit': limit,
'offset': offset}
)
except Exception as e:
log = logging.getLogger('theonionbox')
log.warning('Failed to get persisted data: {}'.format(e))
return None
res = cur.fetchall()
return res | unknown | codeparrot/codeparrot-clean | ||
"""quodsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin, auth
from django.views import defaults as default_views
from django.views.generic import TemplateView
from wagtail.wagtailadmin import urls as wagtailadmin_urls
from wagtail.wagtaildocs import urls as wagtaildocs_urls
from wagtail.wagtailcore import urls as wagtail_urls
urlpatterns = static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + [
# url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name='home'),
# url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name='about'),
url(settings.ADMIN_URL, include(admin.site.urls)), # default=r'^admin/'
url('^auth/', include('django.contrib.auth.urls')), # Removed namespace="users" because causes more issues
url(r'^cms/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns = [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
] + urlpatterns | unknown | codeparrot/codeparrot-clean | ||
/*
* Generic implementation of background process infrastructure.
*/
#include "git-compat-util.h"
#include "sub-process.h"
#include "sigchain.h"
#include "pkt-line.h"
int cmd2process_cmp(const void *cmp_data UNUSED,
const struct hashmap_entry *eptr,
const struct hashmap_entry *entry_or_key,
const void *keydata UNUSED)
{
const struct subprocess_entry *e1, *e2;
e1 = container_of(eptr, const struct subprocess_entry, ent);
e2 = container_of(entry_or_key, const struct subprocess_entry, ent);
return strcmp(e1->cmd, e2->cmd);
}
struct subprocess_entry *subprocess_find_entry(struct hashmap *hashmap, const char *cmd)
{
struct subprocess_entry key;
hashmap_entry_init(&key.ent, strhash(cmd));
key.cmd = cmd;
return hashmap_get_entry(hashmap, &key, ent, NULL);
}
int subprocess_read_status(int fd, struct strbuf *status)
{
int len;
for (;;) {
char *line;
const char *value;
len = packet_read_line_gently(fd, NULL, &line);
if ((len < 0) || !line)
break;
if (skip_prefix(line, "status=", &value)) {
/* the last "status=<foo>" line wins */
strbuf_reset(status);
strbuf_addstr(status, value);
}
}
return (len < 0) ? len : 0;
}
void subprocess_stop(struct hashmap *hashmap, struct subprocess_entry *entry)
{
if (!entry)
return;
entry->process.clean_on_exit = 0;
kill(entry->process.pid, SIGTERM);
finish_command(&entry->process);
hashmap_remove(hashmap, &entry->ent, NULL);
}
static void subprocess_exit_handler(struct child_process *process)
{
sigchain_push(SIGPIPE, SIG_IGN);
/* Closing the pipe signals the subprocess to initiate a shutdown. */
close(process->in);
close(process->out);
sigchain_pop(SIGPIPE);
/* Finish command will wait until the shutdown is complete. */
finish_command(process);
}
int subprocess_start(struct hashmap *hashmap, struct subprocess_entry *entry, const char *cmd,
subprocess_start_fn startfn)
{
int err;
struct child_process *process;
entry->cmd = cmd;
process = &entry->process;
child_process_init(process);
strvec_push(&process->args, cmd);
process->use_shell = 1;
process->in = -1;
process->out = -1;
process->clean_on_exit = 1;
process->clean_on_exit_handler = subprocess_exit_handler;
process->trace2_child_class = "subprocess";
err = start_command(process);
if (err) {
error("cannot fork to run subprocess '%s'", cmd);
return err;
}
hashmap_entry_init(&entry->ent, strhash(cmd));
err = startfn(entry);
if (err) {
error("initialization for subprocess '%s' failed", cmd);
subprocess_stop(hashmap, entry);
return err;
}
hashmap_add(hashmap, &entry->ent);
return 0;
}
static int handshake_version(struct child_process *process,
const char *welcome_prefix, int *versions,
int *chosen_version)
{
int version_scratch;
int i;
char *line;
const char *p;
if (!chosen_version)
chosen_version = &version_scratch;
if (packet_write_fmt_gently(process->in, "%s-client\n",
welcome_prefix))
return error("Could not write client identification");
for (i = 0; versions[i]; i++) {
if (packet_write_fmt_gently(process->in, "version=%d\n",
versions[i]))
return error("Could not write requested version");
}
if (packet_flush_gently(process->in))
return error("Could not write flush packet");
if (!(line = packet_read_line(process->out, NULL)) ||
!skip_prefix(line, welcome_prefix, &p) ||
strcmp(p, "-server"))
return error("Unexpected line '%s', expected %s-server",
line ? line : "<flush packet>", welcome_prefix);
if (!(line = packet_read_line(process->out, NULL)) ||
!skip_prefix(line, "version=", &p) ||
strtol_i(p, 10, chosen_version))
return error("Unexpected line '%s', expected version",
line ? line : "<flush packet>");
if ((line = packet_read_line(process->out, NULL)))
return error("Unexpected line '%s', expected flush", line);
/* Check to make sure that the version received is supported */
for (i = 0; versions[i]; i++) {
if (versions[i] == *chosen_version)
break;
}
if (!versions[i])
return error("Version %d not supported", *chosen_version);
return 0;
}
static int handshake_capabilities(struct child_process *process,
struct subprocess_capability *capabilities,
unsigned int *supported_capabilities)
{
int i;
char *line;
for (i = 0; capabilities[i].name; i++) {
if (packet_write_fmt_gently(process->in, "capability=%s\n",
capabilities[i].name))
return error("Could not write requested capability");
}
if (packet_flush_gently(process->in))
return error("Could not write flush packet");
while ((line = packet_read_line(process->out, NULL))) {
const char *p;
if (!skip_prefix(line, "capability=", &p))
continue;
for (i = 0;
capabilities[i].name && strcmp(p, capabilities[i].name);
i++)
;
if (capabilities[i].name) {
if (supported_capabilities)
*supported_capabilities |= capabilities[i].flag;
} else {
die("subprocess '%s' requested unsupported capability '%s'",
process->args.v[0], p);
}
}
return 0;
}
int subprocess_handshake(struct subprocess_entry *entry,
const char *welcome_prefix,
int *versions,
int *chosen_version,
struct subprocess_capability *capabilities,
unsigned int *supported_capabilities)
{
int retval;
struct child_process *process = &entry->process;
sigchain_push(SIGPIPE, SIG_IGN);
retval = handshake_version(process, welcome_prefix, versions,
chosen_version) ||
handshake_capabilities(process, capabilities,
supported_capabilities);
sigchain_pop(SIGPIPE);
return retval;
} | c | github | https://github.com/git/git | sub-process.c |
#!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
from diamond.collector import Collector
from xen_collector import XENCollector
###############################################################################
def run_only_if_libvirt_is_available(func):
try:
import libvirt
except ImportError:
libvirt = None
pred = lambda: libvirt is not None
return run_only(func, pred)
class TestXENCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('XENCollector', {
})
self.collector = XENCollector(config, None)
def test_import(self):
self.assertTrue(XENCollector)
@run_only_if_libvirt_is_available
@patch('os.statvfs')
@patch('libvirt.openReadOnly')
@patch.object(Collector, 'publish')
def test_centos6(self, publish_mock, libvirt_mock, os_mock):
class info:
def __init__(self, id):
self.id = id
def info(self):
if self.id == 0:
return [1, 49420888L, 49420888L, 8, 911232000000000L]
if self.id == 1:
return [1, 2097152L, 2097152L, 2, 310676150000000L]
if self.id == 2:
return [1, 2097152L, 2097152L, 2, 100375300000000L]
if self.id == 3:
return [1, 10485760L, 10485760L, 2, 335312040000000L]
if self.id == 4:
return [1, 10485760L, 10485760L, 2, 351313480000000L]
libvirt_m = Mock()
libvirt_m.getInfo.return_value = ['x86_64', 48262, 8, 1200, 2, 1, 4, 1]
libvirt_m.listDomainsID.return_value = [0, 2, 1, 4, 3]
def lookupByIdMock(id):
lookup = info(id)
return lookup
libvirt_m.lookupByID = lookupByIdMock
libvirt_mock.return_value = libvirt_m
statsvfs_mock = Mock()
statsvfs_mock.f_bavail = 74492145
statsvfs_mock.f_frsize = 4096
os_mock.return_value = statsvfs_mock
self.collector.collect()
metrics = {
'TotalCores': 8.000000,
'InstalledMem': 48262.000000,
'MemAllocated': 24576.000000,
'MemFree': 23686.000000,
'DiskFree': 297968580.000000,
'FreeCores': 0.000000,
'AllocatedCores': 8.000000,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == "__main__":
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import unittest
import os
import asn1crypto.x509
from oscrypto import asymmetric
from ocspbuilder import OCSPRequestBuilder
from ._unittest_compat import patch
patch()
tests_root = os.path.dirname(__file__)
fixtures_dir = os.path.join(tests_root, 'fixtures')
class OCSPRequestBuilderTests(unittest.TestCase):
def test_build_basic_request(self):
issuer_cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'test.crt'))
subject_cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'test-inter.crt'))
builder = OCSPRequestBuilder(subject_cert, issuer_cert)
ocsp_request = builder.build()
der_bytes = ocsp_request.dump()
new_request = asn1crypto.ocsp.OCSPRequest.load(der_bytes)
tbs_request = new_request['tbs_request']
self.assertEqual(None, new_request['optional_signature'].native)
self.assertEqual('v1', tbs_request['version'].native)
self.assertEqual(None, tbs_request['requestor_name'].native)
self.assertEqual(1, len(tbs_request['request_list']))
request = tbs_request['request_list'][0]
self.assertEqual('sha1', request['req_cert']['hash_algorithm']['algorithm'].native)
self.assertEqual(issuer_cert.asn1.subject.sha1, request['req_cert']['issuer_name_hash'].native)
self.assertEqual(issuer_cert.asn1.public_key.sha1, request['req_cert']['issuer_key_hash'].native)
self.assertEqual(subject_cert.asn1.serial_number, request['req_cert']['serial_number'].native)
self.assertEqual(0, len(request['single_request_extensions']))
self.assertEqual(1, len(tbs_request['request_extensions']))
extn = tbs_request['request_extensions'][0]
self.assertEqual('nonce', extn['extn_id'].native)
self.assertEqual(16, len(extn['extn_value'].parsed.native))
def test_build_signed_request(self):
issuer_cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'test.crt'))
subject_cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'test-inter.crt'))
requestor_cert = asymmetric.load_certificate(os.path.join(fixtures_dir, 'test-third.crt'))
requestor_key = asymmetric.load_private_key(os.path.join(fixtures_dir, 'test-third.key'))
builder = OCSPRequestBuilder(subject_cert, issuer_cert)
ocsp_request = builder.build(requestor_key, requestor_cert, [subject_cert, issuer_cert])
der_bytes = ocsp_request.dump()
new_request = asn1crypto.ocsp.OCSPRequest.load(der_bytes)
tbs_request = new_request['tbs_request']
signature = new_request['optional_signature']
self.assertEqual('sha256', signature['signature_algorithm'].hash_algo)
self.assertEqual('rsassa_pkcs1v15', signature['signature_algorithm'].signature_algo)
self.assertEqual(3, len(signature['certs']))
self.assertEqual('v1', tbs_request['version'].native)
self.assertEqual(requestor_cert.asn1.subject, tbs_request['requestor_name'].chosen)
self.assertEqual(1, len(tbs_request['request_list']))
request = tbs_request['request_list'][0]
self.assertEqual('sha1', request['req_cert']['hash_algorithm']['algorithm'].native)
self.assertEqual(issuer_cert.asn1.subject.sha1, request['req_cert']['issuer_name_hash'].native)
self.assertEqual(issuer_cert.asn1.public_key.sha1, request['req_cert']['issuer_key_hash'].native)
self.assertEqual(subject_cert.asn1.serial_number, request['req_cert']['serial_number'].native)
self.assertEqual(0, len(request['single_request_extensions']))
self.assertEqual(1, len(tbs_request['request_extensions']))
extn = tbs_request['request_extensions'][0]
self.assertEqual('nonce', extn['extn_id'].native)
self.assertEqual(16, len(extn['extn_value'].parsed.native)) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_TF2XLA_GRAPH_COMPILER_H_
#define TENSORFLOW_COMPILER_TF2XLA_GRAPH_COMPILER_H_
#include "tensorflow/compiler/tf2xla/xla_compilation_device.h"
#include "tensorflow/compiler/tf2xla/xla_context.h"
#include "xla/client/local_client.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/notification.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/core/public/version.h"
namespace tensorflow {
// GraphCompiler compiles the graph in topological order in the current
// thread. It also resolves the nondeterminism in the graph by enforcing a
// total order on all inputs to a node. This abstraction helps us create the
// same XLA computation given two structurally equivalent TensorFlow graphs.
// If a function call is visited during the graph traversal, it is then
// compiled through the xla_context into a computation and a `Call` operation
// is inserted to call into that computation.
//
// Note: GraphCompiler was created to remove our dependency to TF Executor in
// the history. There are still some todos so that we can completely decouple
// from Executor.
//
// TODO(yunxing): Remove usage of XlaCompilationDevice.
//
// TODO(yunxing): Remove the hack that wraps XlaExpression within a tensor now
// that we don't use TF Executor to pass around a tensor.
//
// TODO(yunxing): Make XlaOpkernel not a subclass of OpKernel so that it can
// handle a XlaExpression directly instead of a Tensor. This may require our own
// op registration infrastructure instead of FunctionLibraryRuntime.
class GraphCompiler {
public:
GraphCompiler(XlaCompilationDevice* device, Graph* graph,
FunctionLibraryRuntime* flib,
ScopedStepContainer* step_container)
: device_(device),
graph_(graph),
flib_(flib),
step_container_(step_container) {}
// Compiles the graph. The results are written in xla_context stored in the
// resource_manager of the 'XlaCompilationDevice' that's passed into the
// constructor.
absl::Status Compile();
private:
// Partially sets params. This partially set params can be reused
// across multiple nodes visit.
void PartiallySetupParams(OpKernelContext::Params* params);
// Compiles a functional node and writes result to OpkernelContext. A
// functional node represents a defined computation and should be compiled
// using `compiler_`.
absl::Status CompileFunctionalNode(Node* n, OpKernelContext* op_context);
XlaCompilationDevice* device_;
Graph* graph_;
FunctionLibraryRuntime* flib_;
ScopedStepContainer* step_container_;
// A buffer to hold tensor inputs to a node, this is reused across the graph
// traversal.
absl::InlinedVector<TensorValue, 4> tensor_inputs_;
};
} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_TF2XLA_GRAPH_COMPILER_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/compiler/tf2xla/graph_compiler.h |
framework:
http_client:
default_options:
resolve:
host: 127.0.0.1
scoped_clients:
foo:
base_uri: http://example.com
query:
key: foo
resolve:
host: 127.0.0.1 | unknown | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/DependencyInjection/Fixtures/yml/http_client_xml_key.yml |
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from cornice import Service
from pyramid.exceptions import HTTPNotFound
from sqlalchemy import func, distinct
from sqlalchemy.sql import or_
import math
from bodhi import log
from bodhi.models import Update, Build, Bug, CVE, Package, User, Release, Group
import bodhi.schemas
import bodhi.security
from bodhi.validators import (
validate_nvrs,
validate_uniqueness,
validate_builds,
validate_enums,
validate_updates,
validate_packages,
validate_releases,
validate_release,
validate_username,
validate_groups,
)
build = Service(name='build', path='/builds/{nvr}',
description='Koji builds',
cors_origins=bodhi.security.cors_origins_ro)
builds = Service(name='builds', path='/builds/',
description='Koji builds',
cors_origins=bodhi.security.cors_origins_ro)
@build.get(renderer='json')
def get_build(request):
nvr = request.matchdict.get('nvr')
build = Build.get(nvr, request.db)
if not build:
request.errors.add('body', 'nvr', 'No such build')
request.errors.status = HTTPNotFound.code
return
return build
@builds.get(schema=bodhi.schemas.ListBuildSchema, renderer='json',
validators=(validate_releases, validate_updates,
validate_packages))
def query_builds(request):
db = request.db
data = request.validated
query = db.query(Build)
nvr = data.get('nvr')
if nvr is not None:
query = query.filter(Build.nvr==nvr)
updates = data.get('updates')
if updates is not None:
query = query.join(Build.update)
args = \
[Update.title==update.title for update in updates] +\
[Update.alias==update.alias for update in updates]
query = query.filter(or_(*args))
packages = data.get('packages')
if packages is not None:
query = query.join(Build.package)
query = query.filter(or_(*[Package.id==p.id for p in packages]))
releases = data.get('releases')
if releases is not None:
query = query.join(Build.release)
query = query.filter(or_(*[Release.id==r.id for r in releases]))
# We can't use ``query.count()`` here because it is naive with respect to
# all the joins that we're doing above.
count_query = query.with_labels().statement\
.with_only_columns([func.count(distinct(Build.nvr))])\
.order_by(None)
total = db.execute(count_query).scalar()
page = data.get('page')
rows_per_page = data.get('rows_per_page')
pages = int(math.ceil(total / float(rows_per_page)))
query = query.offset(rows_per_page * (page - 1)).limit(rows_per_page)
return dict(
builds=query.all(),
page=page,
pages=pages,
rows_per_page=rows_per_page,
total=total,
) | unknown | codeparrot/codeparrot-clean | ||
#ifndef C10_UTIL_OPTIONAL_H_
#define C10_UTIL_OPTIONAL_H_
#include <optional>
#include <type_traits>
// Macros.h is not needed, but it does namespace shenanigans that lots
// of downstream code seems to rely on. Feel free to remove it and fix
// up builds.
namespace c10 {
#if !defined(FBCODE_CAFFE2) && !defined(C10_NODEPRECATED)
// NOLINTNEXTLINE(misc-unused-using-decls)
using std::bad_optional_access;
// NOLINTNEXTLINE(misc-unused-using-decls)
using std::make_optional;
// NOLINTNEXTLINE(misc-unused-using-decls)
using std::nullopt;
// NOLINTNEXTLINE(misc-unused-using-decls)
using std::nullopt_t;
// NOLINTNEXTLINE(misc-unused-using-decls)
using std::optional;
#endif
#if !defined(FBCODE_CAFFE2) && !defined(C10_NODEPRECATED)
namespace detail_ {
// the call to convert<A>(b) has return type A and converts b to type A iff b
// decltype(b) is implicitly convertible to A
template <class U>
constexpr U convert(U v) {
return v;
}
} // namespace detail_
template <class T, class F>
[[deprecated(
"Please use std::optional::value_or instead of c10::value_or_else")]] constexpr T
value_or_else(const std::optional<T>& v, F&& func) {
static_assert(
std::is_convertible_v<typename std::invoke_result_t<F>, T>,
"func parameters must be a callable that returns a type convertible to the value stored in the optional");
return v.has_value() ? *v : detail_::convert<T>(std::forward<F>(func)());
}
template <class T, class F>
[[deprecated(
"Please use std::optional::value_or instead of c10::value_or_else")]] constexpr T
value_or_else(std::optional<T>&& v, F&& func) {
static_assert(
std::is_convertible_v<typename std::invoke_result_t<F>, T>,
"func parameters must be a callable that returns a type convertible to the value stored in the optional");
return v.has_value() ? constexpr_move(std::move(v).contained_val())
: detail_::convert<T>(std::forward<F>(func)());
}
#endif
} // namespace c10
#endif // C10_UTIL_OPTIONAL_H_ | c | github | https://github.com/pytorch/pytorch | c10/util/Optional.h |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to decode a crash dump generated by untrusted_crash_dump.[ch]
Currently this produces a simple stack trace.
"""
import argparse
import json
import os
import posixpath
import subprocess
import sys
class CoreDecoder(object):
"""Class to process core dumps."""
def __init__(self, main_nexe, nmf_filename,
addr2line, library_paths, platform):
"""Construct and object to process core dumps.
Args:
main_nexe: nexe to resolve NaClMain references from.
nmf_filename: nmf to resolve references from.
addr2line: path to appropriate addr2line.
library_paths: list of paths to search for libraries.
platform: platform string to use in nmf files.
"""
self.main_nexe = main_nexe
self.nmf_filename = nmf_filename
if nmf_filename == '-':
self.nmf_data = {}
else:
self.nmf_data = json.load(open(nmf_filename))
self.addr2line = addr2line
self.library_paths = library_paths
self.platform = platform
def _SelectModulePath(self, filename):
"""Select which path to get a module from.
Args:
filename: filename of a module (as appears in phdrs).
Returns:
Full local path to the file.
Derived by consulting the manifest.
"""
# For some names try the main nexe.
# NaClMain is the argv[0] setup in sel_main.c
# (null) shows up in chrome.
if self.main_nexe is not None and filename in ['NaClMain', '(null)']:
return self.main_nexe
filepart = posixpath.basename(filename)
nmf_entry = self.nmf_data.get('files', {}).get(filepart, {})
nmf_url = nmf_entry.get(self.platform, {}).get('url')
# Try filename directly if not in manifest.
if nmf_url is None:
return filename
# Look for the module relative to the manifest (if any),
# then in other search paths.
paths = []
if self.nmf_filename != '-':
paths.append(os.path.dirname(self.nmf_filename))
paths.extend(self.library_paths)
for path in paths:
pfilename = os.path.join(path, nmf_url)
if os.path.exists(pfilename):
return pfilename
# If nothing else, try the path directly.
return filename
def _DecodeAddressSegment(self, segments, address):
"""Convert an address to a segment relative one, plus filename.
Args:
segments: a list of phdr segments.
address: a process wide code address.
Returns:
A tuple of filename and segment relative address.
"""
for segment in segments:
for phdr in segment['dlpi_phdr']:
start = segment['dlpi_addr'] + phdr['p_vaddr']
end = start + phdr['p_memsz']
if address >= start and address < end:
return (segment['dlpi_name'], address - segment['dlpi_addr'])
return ('(null)', address)
def _Addr2Line(self, segments, address):
"""Use addr2line to decode a code address.
Args:
segments: A list of phdr segments.
address: a code address.
Returns:
A list of dicts containing: function, filename, lineno.
"""
filename, address = self._DecodeAddressSegment(segments, address)
filename = self._SelectModulePath(filename)
if not os.path.exists(filename):
return [{
'function': 'Unknown_function',
'filename': 'unknown_file',
'lineno': -1,
}]
# Use address - 1 to get the call site instead of the line after.
address -= 1
cmd = [
self.addr2line, '-f', '--inlines', '-e', filename, '0x%08x' % address,
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process_stdout, _ = process.communicate()
assert process.returncode == 0
lines = process_stdout.splitlines()
assert len(lines) % 2 == 0
results = []
for index in xrange(len(lines) / 2):
func = lines[index * 2]
afilename, lineno = lines[index * 2 + 1].split(':', 1)
results.append({
'function': func,
'filename': afilename,
'lineno': int(lineno),
})
return results
def Decode(self, text):
core = json.loads(text)
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def LoadAndDecode(self, core_path):
"""Given a core.json file, load and embellish with decoded addresses.
Args:
core_path: source file containing a dump.
Returns:
An embellished core dump dict (decoded code addresses).
"""
core = json.load(open(core_path))
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def StackTrace(self, info):
"""Convert a decoded core.json dump to a simple stack trace.
Args:
info: core.json info with decoded code addresses.
Returns:
A list of dicts with filename, lineno, function (deepest first).
"""
trace = []
for frame in info['frames']:
for scope in frame['scopes']:
trace.append(scope)
return trace
def PrintTrace(self, trace, out):
"""Print a trace to a file like object.
Args:
trace: A list of [filename, lineno, function] (deepest first).
out: file like object to output the trace to.
"""
for scope in trace:
out.write('%s at %s:%d\n' % (
scope['function'],
scope['filename'],
scope['lineno']))
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-m', '--main-nexe',
help='nexe to resolve NaClMain references from')
parser.add_argument('-n', '--nmf', default='-',
help='nmf to resolve references from')
parser.add_argument('-a', '--addr2line',
help='path to appropriate addr2line')
parser.add_argument('-L', '--library-path', dest='library_paths',
action='append', default=[],
help='path to search for shared libraries')
parser.add_argument('-p', '--platform',
help='platform in a style match nmf files')
parser.add_argument('core_json')
options = parser.parse_args(args)
decoder = CoreDecoder(
main_nexe=options.main_nexe,
nmf_filename=options.nmf,
addr2line=options.addr2line,
library_paths=options.library_paths,
platform=options.platform)
info = decoder.LoadAndDecode(options.core_json)
trace = decoder.StackTrace(info)
decoder.PrintTrace(trace, sys.stdout)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_FRAMEWORK_VERSIONS_H_
#define TENSORFLOW_CORE_FRAMEWORK_VERSIONS_H_
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
class VersionDef;
// Check whether data with the given versions is compatible with the given
// consumer and min producer. upper_name and lower_name are used to form
// error messages upon failure. Example usage:
//
// #include "tensorflow/core/public/version.h"
//
// TF_RETURN_IF_ERROR(CheckVersions(versions, TF_GRAPH_DEF_VERSION,
// TF_GRAPH_DEF_VERSION_MIN_PRODUCER,
// "GraphDef", "graph"));
absl::Status CheckVersions(const VersionDef& versions, int consumer,
int min_producer, const char* upper_name,
const char* lower_name);
} // namespace tensorflow
#endif // TENSORFLOW_CORE_FRAMEWORK_VERSIONS_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/framework/versions.h |
#! /usr/bin/env python
import os
import itertools as it
import sys
import textwrap
#import gtk
import numpy as np
import sympy as sy
import sympy.stats
import odespy as ode
import matplotlib
import matplotlib.pyplot as plt
import sympy.physics.mechanics as mech
"""
Pretty plotting code.
"""
_all_spines = ["top", "right", "bottom", "left"]
def hide_spines(s=["top", "right"]):
"""Hides the top and rightmost axis spines from view for all active
figures and their respective axes."""
global _all_spines
# Retrieve a list of all current figures.
figures = [x for x in matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for figure in figures:
# Get all Axis instances related to the figure.
for ax in figure.canvas.figure.get_axes():
for spine in _all_spines :
if spine in s :
ax.spines[spine].set_color('none')
if "top" in s and "bottom" in s :
ax.xaxis.set_ticks_position('none')
elif "top" in s :
ax.xaxis.set_ticks_position('bottom')
elif "bottom" in s :
ax.xaxis.set_ticks_position('top')
else :
ax.xaxis.set_ticks_position('both')
if "left" in s and "right" in s :
ax.yaxis.set_ticks_position('none')
elif "left" in s :
ax.yaxis.set_ticks_position('right')
elif "right" in s :
ax.yaxis.set_ticks_position('left')
else :
ax.yaxis.set_ticks_position('both')
"""
FORTRAN compilation code.
"""
def find_matching_parentheses(s, popen="(", pclose=")") :
i_start = s.find(popen)
i_end = -1
count = 0
s_frame = s[i_start:]
for i in xrange(len(s_frame)) :
char = s_frame[i]
if char == popen :
count += 1
elif char == pclose :
count -= 1
if count == 0 :
i_end = i + i_start + 1
break
return i_start, i_end
def parse_merge(H, s) :
"""
Parse the first FORTRAN merge statement found within s.
H is the name of a hidden variable which will be used to store the value of
the piecewise function defined by the merge statement.
"""
# extract bracketed code in merge statement from s
# m_statement is of form "(expr1,expr2,cond)"
i_merge_start = s.find("merge")
ms = s[i_merge_start:]
i_start, i_end = find_matching_parentheses(ms)
m_statement = ms[i_start:i_end]
# print m_statement
# extract expr1, expr2, and conditional
i1 = m_statement.find(",")
i2 = m_statement.rfind(",")
expr1 = m_statement[1:i1]
expr2 = m_statement[i1 + 1:i2]
cond = m_statement[i2 + 1:-1]
# if expr1, expr2, or cond are merge statements, recursively call this
# function otherwise, set the hidden switch variable to take the value of
# the relevant expr
if expr1.find("merge") != -1 :
expr1_str = parse_merge(H, expr1)[-1]
expr1_str = "".join([" " + s + "\n" for s in expr1_str.splitlines()])
else :
expr1_str = " " + H + "=" + expr1
if expr2.find("merge") != -1 :
expr2_str = parse_merge(H, expr2)[-1]
expr2_str = "".join([" " + s + "\n" for s in expr2_str.splitlines()])
else :
expr2_str = " " + H + "=" + expr2
# format expr1_str, expr2_str, and cond into a correct FORTRAN IF-THEN-ELSE
# statement
f_code = " IF (" + cond.strip() + ") THEN \n" + expr1_str + "\n" + \
" ELSE \n" + expr2_str + "\n" + \
" ENDIF \n"
return i_merge_start, i_merge_start + i_end, f_code
def FORTRAN_f(x, f, parameters=[], verbose=False) :
"""
Produce FORTRAN function for evaluating a vector-valued SymPy expression f
given a state vector x.
The FORTRAN function will have the signature f_f77(neq, t, X, Y) where neq
is hidden and Y is an output matrix.
"""
# TODO remove code for dealing with stochastic systems -- it is not used in
# this paper
x = list(x) + list(parameters)
f = list(f) + [0]*len(parameters)
rv = list(set((np.concatenate([sy.stats.random_symbols(f_i) for f_i in f]))))
NR = len(rv)
if NR > 0 :
x += [sy.symbols("dt"), sy.symbols("seed")]
f += [0, 0]
NX = len(x)
NY = len(f)
if NX != NY :
raise Exception("System is not square!")
if verbose : print "generating FORTRAN matrices..."
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
_R = sy.tensor.IndexedBase("R", shape=(NR, ))
R = [_R[i + 1] for i in xrange(NR)]
if type(f) != sy.Matrix : f = sy.Matrix(f)
# WARNING : These substitution steps are VERY SLOW!!! It might be wise to
# parallelise them in the future, or at least substitute into one dynamical
# equation at a time so that progress can be monitored.
if verbose : print "substituting matrix elements for original state variables and parameters (WARNING: SLOW)..."
f_sub = f.subs(zip(x, X))
if verbose : print "substituting matrix elements for random variables (WARNING: SLOW)..."
f_sub = f_sub.subs(zip(rv, R))
# generate FORTRAN code
if verbose : print "generating FORTRAN code from dynamics equations..."
fstrs = [sy.fcode(fi, standard=95) for fi in f_sub]
# remove whitespace and newlines
if verbose : print "removing whitespace and newlines..."
fstrs = ["".join(fi.split()) for fi in fstrs]
# remove all @ (FORTRAN line continuation indicator)
if verbose : print "removing line continuations..."
fstrs = [fi.replace("@", "") for fi in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
if verbose : print "formatting piecewise functions..."
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
if verbose : print "formatting state equations..."
for i in xrange(len(fstrs)) :
fstrs[i] = wrapper.fill("Y(" + str(i + 1) + ")=" + fstrs[i]) + "\n"
# put the above elements together into a FORTRAN subroutine
if verbose : print "formatting preamble..."
hdr = " subroutine f_f77(neq, t, X, Y) \n" +\
"Cf2py intent(hide) neq \n" +\
"Cf2py intent(out) Y \n" +\
" integer neq \n" +\
" double precision t, X, Y \n" +\
" dimension X(neq), Y(neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
# TODO fix the following -- assumes dt = 0.01
# NOTE this is only important when dealing with stochastic systems
if NR > 0 : hdr += " real, dimension(" + str(NR) + ") :: R \n" +\
" integer :: SEED \n" +\
" real :: RTRASH \n" +\
" SEED = INT((t/" + sy.fcode(X[-2]).strip() +\
") + " + sy.fcode(X[-1]).strip() + ") \n" +\
" CALL SRAND(SEED) \n" +\
" DO i=1,4 \n" +\
" RTRASH=RAND(0) \n" +\
" END DO \n"
R_block = "".join([sy.fcode(R_i) + "=RAND(0) \n" for R_i in R])
H_block = "".join(Hstrs)
Y_block = "".join(fstrs)
if verbose : print "assembling source code blocks..."
fcode = hdr + R_block + H_block + Y_block + " return \n" + " end \n"
# final formatting
if verbose : print "final source code formatting..."
wrapper = textwrap.TextWrapper(expand_tabs=True, replace_whitespace=True,
initial_indent="", subsequent_indent=" @ ", width=60)
fcode = "".join([wrapper.fill(src) + "\n" for src in fcode.split("\n")])
return fcode
def FORTRAN_jacobian(x, jac, parameters=[]) :
# TODO document
# TODO remove this function if unused in paper
NX = len(x)
NP = len(parameters)
Nrowpd = jac.shape[0]
Ncolpd = jac.shape[1]
if NX != Nrowpd != Ncolpd :
raise Exception("System is not square!")
_X = sy.tensor.IndexedBase("X", shape=(NX, ))
X = [_X[i + 1] for i in xrange(NX)]
X = X + [_X[NX + i + 1] for i in xrange(NP)]
if type(jac) == sy.Matrix : jac = sy.Matrix(jac)
jac_sub = jac.subs(zip(list(x) + list(parameters), X))
ijs = [i for i in it.product(xrange(Nrowpd), xrange(Ncolpd))]
# generate FORTRAN code
fstrs = [sy.fcode(jac_ij) for jac_ij in jac_sub]
# remove whitespace and newlines
fstrs = ["".join(jac_ij.split()) for jac_ij in fstrs]
# remove all @ (FORTRAN line continuation indicator)
fstrs = [jac_ij.replace("@", "") for jac_ij in fstrs]
# find FORTRAN inline merge statements and replace with a hidden "switch"
# variable whose value is set by a full IF statement at the start of the
# function call.
# -- this is needed because FORTRAN77 doesn't support inline merge statements
Hstrs = [] # to hold hidden switch expressions
for i in xrange(len(fstrs)) :
while fstrs[i].find("merge") != -1 :
H = "H(" + str(len(Hstrs) + 1) + ")"
i_merge_start, i_merge_end, Hstr = parse_merge(H, fstrs[i])
fstrs[i] = fstrs[i][:i_merge_start] + H + fstrs[i][i_merge_end:]
Hstrs.append(Hstr)
NH = len(Hstrs)
# format the fstrs
wrapper = textwrap.TextWrapper(expand_tabs=True,
replace_whitespace=True,
initial_indent=" ",
subsequent_indent=" @ ",
width=60)
for k in xrange(len(fstrs)) :
i, j = ijs[k]
fstrs[k] = wrapper.fill("pd(" + str(i + 1) + "," + str(j + 1) + ")=" + fstrs[k]) + "\n"
# put the above elements together into a FORTRAN subroutine
hdr = " subroutine jac_f77(neq, t, X, ml, mu, pd, nrowpd) \n" +\
"Cf2py intent(hide) neq, ml, mu, nrowpd \n" +\
"Cf2py intent(out) pd \n" +\
" integer neq, ml, mu, nrowpd \n" +\
" double precision t, X, pd \n" +\
" dimension X(neq), pd(neq, neq) \n"
if NH > 0 : hdr += " real, dimension(" + str(NH) + ") :: H \n"
H_block = "".join(Hstrs)
pd_block = "".join(fstrs)
fcode = hdr + H_block + pd_block + " return \n" + " end \n"
return fcode
def FORTRAN_compile(fcode) :
f_f77 = ode.compile_f77(fcode)
os.remove("tmp_callback.so")
# reload(ode)
return f_f77
"""
Numerical integration code.
"""
def FORTRAN_integrate(t, x0, f, p0=[], jac=None, rtol=0.0001, atol=0.0001) :
solver = ode.Lsodes(f=None, f_f77=f, jac_f77=jac, rtol=rtol, atol=atol)
solver.set_initial_condition(list(x0) + list(p0))
x, _ = solver.solve(t)
return x | unknown | codeparrot/codeparrot-clean | ||
import csv
import codecs
import cStringIO
from collections import OrderedDict
from itertools import chain
from django.conf import settings
from pyxform.section import Section, RepeatingSection
from pyxform.question import Question
from onadata.apps.logger.models.xform import XForm
from onadata.apps.viewer.models.data_dictionary import DataDictionary
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.libs.exceptions import NoRecordsFoundError
from onadata.libs.utils.common_tags import ID, XFORM_ID_STRING, STATUS,\
ATTACHMENTS, GEOLOCATION, UUID, SUBMISSION_TIME, NA_REP,\
BAMBOO_DATASET_ID, DELETEDAT, TAGS, NOTES, SUBMITTED_BY, VERSION,\
DURATION
from onadata.libs.utils.export_tools import question_types_to_exclude
# the bind type of select multiples that we use to compare
MULTIPLE_SELECT_BIND_TYPE = u"select"
GEOPOINT_BIND_TYPE = u"geopoint"
# column group delimiters
GROUP_DELIMITER_SLASH = '/'
GROUP_DELIMITER_DOT = '.'
DEFAULT_GROUP_DELIMITER = GROUP_DELIMITER_SLASH
GROUP_DELIMITERS = [GROUP_DELIMITER_SLASH, GROUP_DELIMITER_DOT]
def remove_dups_from_list_maintain_order(l):
return list(OrderedDict.fromkeys(l))
def get_prefix_from_xpath(xpath):
xpath = str(xpath)
parts = xpath.rsplit('/', 1)
if len(parts) == 1:
return None
elif len(parts) == 2:
return '%s/' % parts[0]
else:
raise ValueError(
'%s cannot be prefixed, it returns %s' % (xpath, str(parts)))
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([unicode(s).encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def write_to_csv(path, rows, columns, remove_group_name=False):
na_rep = getattr(settings, 'NA_REP', NA_REP)
with open(path, 'wb') as csvfile:
writer = UnicodeWriter(csvfile, lineterminator='\n')
# Check if to truncate the group name prefix
if remove_group_name:
new_colum = [col.split('/')[-1:][0]
if '/' in col else col for col in columns]
writer.writerow(new_colum)
else:
writer.writerow(columns)
for row in rows:
for col in AbstractDataFrameBuilder.IGNORED_COLUMNS:
row.pop(col, None)
writer.writerow([row.get(col, na_rep) for col in columns])
class AbstractDataFrameBuilder(object):
IGNORED_COLUMNS = [XFORM_ID_STRING, STATUS, ID, ATTACHMENTS, GEOLOCATION,
BAMBOO_DATASET_ID, DELETEDAT]
# fields NOT within the form def that we want to include
ADDITIONAL_COLUMNS = [
UUID, SUBMISSION_TIME, TAGS, NOTES, VERSION, DURATION, SUBMITTED_BY]
BINARY_SELECT_MULTIPLES = False
"""
Group functionality used by any DataFrameBuilder i.e. XLS, CSV and KML
"""
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False,
start=None, end=None, remove_group_name=False):
self.username = username
self.id_string = id_string
self.filter_query = filter_query
self.group_delimiter = group_delimiter
self.split_select_multiples = split_select_multiples
self.BINARY_SELECT_MULTIPLES = binary_select_multiples
self.start = start
self.end = end
self.remove_group_name = remove_group_name
self.xform = XForm.objects.get(id_string=self.id_string,
user__username=self.username)
self._setup()
def _setup(self):
self.dd = DataDictionary.objects.get(user__username=self.username,
id_string=self.id_string)
self.select_multiples = self._collect_select_multiples(self.dd)
self.gps_fields = self._collect_gps_fields(self.dd)
@classmethod
def _fields_to_select(cls, dd):
return [c.get_abbreviated_xpath()
for c in dd.get_survey_elements() if isinstance(c, Question)]
@classmethod
def _collect_select_multiples(cls, dd):
return dict([(e.get_abbreviated_xpath(), [c.get_abbreviated_xpath()
for c in e.children])
for e in dd.get_survey_elements()
if e.bind.get("type") == "select"])
@classmethod
def _split_select_multiples(cls, record, select_multiples,
binary_select_multiples=False):
""" Prefix contains the xpath and slash if we are within a repeat so
that we can figure out which select multiples belong to which repeat
"""
for key, choices in select_multiples.items():
# the select multiple might be blank or not exist in the record,
# need to make those False
selections = []
if key in record:
# split selected choices by spaces and join by / to the
# element's xpath
selections = ["%s/%s" % (key, r)
for r in record[key].split(" ")]
# remove the column since we are adding separate columns
# for each choice
record.pop(key)
if not binary_select_multiples:
# add columns to record for every choice, with default
# False and set to True for items in selections
record.update(dict([(choice, choice in selections)
for choice in choices]))
else:
YES = 1
NO = 0
record.update(
dict([(choice, YES if choice in selections else NO)
for choice in choices]))
# recurs into repeats
for record_key, record_item in record.items():
if type(record_item) == list:
for list_item in record_item:
if type(list_item) == dict:
cls._split_select_multiples(
list_item, select_multiples)
return record
@classmethod
def _collect_gps_fields(cls, dd):
return [e.get_abbreviated_xpath() for e in dd.get_survey_elements()
if e.bind.get("type") == "geopoint"]
@classmethod
def _tag_edit_string(cls, record):
"""
Turns a list of tags into a string representation.
"""
if '_tags' in record:
tags = []
for tag in record['_tags']:
if ',' in tag and ' ' in tag:
tags.append('"%s"' % tag)
else:
tags.append(tag)
record.update({'_tags': u', '.join(sorted(tags))})
@classmethod
def _split_gps_fields(cls, record, gps_fields):
updated_gps_fields = {}
for key, value in record.iteritems():
if key in gps_fields and isinstance(value, basestring):
gps_xpaths = DataDictionary.get_additional_geopoint_xpaths(key)
gps_parts = dict([(xpath, None) for xpath in gps_xpaths])
# hack, check if its a list and grab the object within that
parts = value.split(' ')
# TODO: check whether or not we can have a gps recording
# from ODKCollect that has less than four components,
# for now we are assuming that this is not the case.
if len(parts) == 4:
gps_parts = dict(zip(gps_xpaths, parts))
updated_gps_fields.update(gps_parts)
# check for repeats within record i.e. in value
elif type(value) == list:
for list_item in value:
if type(list_item) == dict:
cls._split_gps_fields(list_item, gps_fields)
record.update(updated_gps_fields)
def _query_data(self, query='{}', start=0,
limit=ParsedInstance.DEFAULT_LIMIT,
fields='[]', count=False):
# ParsedInstance.query_mongo takes params as json strings
# so we dumps the fields dictionary
count_args = {
'xform': self.xform,
'query': query,
'start': self.start,
'end': self.end,
'fields': '[]',
'sort': '{}',
'count': True
}
count_object = list(ParsedInstance.query_data(**count_args))
record_count = count_object[0]["count"]
if record_count < 1:
raise NoRecordsFoundError("No records found for your query")
# if count was requested, return the count
if count:
return record_count
else:
query_args = {
'xform': self.xform,
'query': query,
'fields': fields,
'start': self.start,
'end': self.end,
# TODO: we might want to add this in for the user
# to sepcify a sort order
'sort': 'id',
'start_index': start,
'limit': limit,
'count': False
}
# use ParsedInstance.query_mongo
cursor = ParsedInstance.query_data(**query_args)
return cursor
class CSVDataFrameBuilder(AbstractDataFrameBuilder):
def __init__(self, username, id_string, filter_query=None,
group_delimiter=DEFAULT_GROUP_DELIMITER,
split_select_multiples=True, binary_select_multiples=False,
start=None, end=None, remove_group_name=False):
super(CSVDataFrameBuilder, self).__init__(
username, id_string, filter_query, group_delimiter,
split_select_multiples, binary_select_multiples, start, end,
remove_group_name)
self.ordered_columns = OrderedDict()
def _setup(self):
super(CSVDataFrameBuilder, self)._setup()
@classmethod
def _reindex(cls, key, value, ordered_columns, parent_prefix=None):
"""
Flatten list columns by appending an index, otherwise return as is
"""
d = {}
# check for lists
if type(value) is list and len(value) > 0 \
and key not in [ATTACHMENTS, NOTES]:
for index, item in enumerate(value):
# start at 1
index += 1
# for each list check for dict, we want to transform the key of
# this dict
if type(item) is dict:
for nested_key, nested_val in item.iteritems():
# given the key "children/details" and nested_key/
# abbreviated xpath
# "children/details/immunization/polio_1",
# generate ["children", index, "immunization/polio_1"]
xpaths = [
"%s[%s]" % (
nested_key[:nested_key.index(key) + len(key)],
index),
nested_key[nested_key.index(key) + len(key) + 1:]]
# re-create xpath the split on /
xpaths = "/".join(xpaths).split("/")
new_prefix = xpaths[:-1]
if type(nested_val) is list:
# if nested_value is a list, rinse and repeat
d.update(cls._reindex(
nested_key, nested_val,
ordered_columns, new_prefix))
else:
# it can only be a scalar
# collapse xpath
if parent_prefix:
xpaths[0:len(parent_prefix)] = parent_prefix
new_xpath = u"/".join(xpaths)
# check if this key exists in our ordered columns
if key in ordered_columns.keys():
if new_xpath not in ordered_columns[key]:
ordered_columns[key].append(new_xpath)
d[new_xpath] = nested_val
else:
d[key] = value
else:
# anything that's not a list will be in the top level dict so its
# safe to simply assign
if key == NOTES:
d[key] = u"\r\n".join(value)
elif key == ATTACHMENTS:
d[key] = []
else:
d[key] = value
return d
@classmethod
def _build_ordered_columns(cls, survey_element, ordered_columns,
is_repeating_section=False):
"""
Build a flat ordered dict of column groups
is_repeating_section ensures that child questions of repeating sections
are not considered columns
"""
for child in survey_element.children:
# child_xpath = child.get_abbreviated_xpath()
if isinstance(child, Section):
child_is_repeating = False
if isinstance(child, RepeatingSection):
ordered_columns[child.get_abbreviated_xpath()] = []
child_is_repeating = True
cls._build_ordered_columns(child, ordered_columns,
child_is_repeating)
elif isinstance(child, Question) and not \
question_types_to_exclude(child.type) and not\
is_repeating_section: # if is_repeating_section,
# its parent already initiliased an empty list
# so we dont add it to our list of columns,
# the repeating columns list will be
# generated when we reindex
ordered_columns[child.get_abbreviated_xpath()] = None
def _format_for_dataframe(self, cursor):
# TODO: check for and handle empty results
# add ordered columns for select multiples
if self.split_select_multiples:
for key, choices in self.select_multiples.items():
# HACK to ensure choices are NOT duplicated
self.ordered_columns[key] = \
remove_dups_from_list_maintain_order(choices)
# add ordered columns for gps fields
for key in self.gps_fields:
gps_xpaths = self.dd.get_additional_geopoint_xpaths(key)
self.ordered_columns[key] = [key] + gps_xpaths
data = []
for record in cursor:
# split select multiples
if self.split_select_multiples:
record = self._split_select_multiples(
record, self.select_multiples,
self.BINARY_SELECT_MULTIPLES)
# check for gps and split into components i.e. latitude, longitude,
# altitude, precision
self._split_gps_fields(record, self.gps_fields)
self._tag_edit_string(record)
flat_dict = {}
# re index repeats
for key, value in record.iteritems():
reindexed = self._reindex(key, value, self.ordered_columns)
flat_dict.update(reindexed)
# if delimetr is diferent, replace within record as well
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
flat_dict = dict((self.group_delimiter.join(k.split('/')), v)
for k, v in flat_dict.iteritems())
data.append(flat_dict)
return data
def export_to(self, path):
self.ordered_columns = OrderedDict()
self._build_ordered_columns(self.dd.survey, self.ordered_columns)
cursor = self._query_data(
self.filter_query)
data = self._format_for_dataframe(cursor)
columns = list(chain.from_iterable(
[[xpath] if cols is None else cols
for xpath, cols in self.ordered_columns.iteritems()]))
# use a different group delimiter if needed
if self.group_delimiter != DEFAULT_GROUP_DELIMITER:
columns = [self.group_delimiter.join(col.split("/"))
for col in columns]
# add extra columns
columns += [col for col in self.ADDITIONAL_COLUMNS]
write_to_csv(path, data, columns,
remove_group_name=self.remove_group_name) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2009 Zuza Software Foundation
#
# This file is part of Pootle.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.core.exceptions import PermissionDenied
from pootle_misc.baseurl import redirect
from pootle_translationproject.models import TranslationProject
from pootle_store.models import Store, Unit
from pootle_store.views import translate_page
from pootle_profile.models import get_profile
from pootle_app.views.language import dispatch
from pootle_app.models.permissions import get_matching_permissions, check_permission
def get_stats_headings():
"""returns a dictionary of localised headings"""
return {
"name": _("Name"),
"translated": _("Translated"),
"translatedpercentage": _("Translated percentage"),
"translatedwords": _("Translated words"),
"fuzzy": _("Fuzzy"),
"fuzzypercentage": _("Fuzzy percentage"),
"fuzzywords": _("Fuzzy words"),
"untranslated": _("Untranslated"),
"untranslatedpercentage": _("Untranslated percentage"),
"untranslatedwords": _("Untranslated words"),
"total": _("Total"),
"totalwords": _("Total Words"),
# l10n: noun. The graphical representation of translation status
"progress": _("Progress"),
"summary": _("Summary"),
}
def get_translation_project(f):
def decorated_f(request, language_code, project_code, *args, **kwargs):
translation_project = get_object_or_404(TranslationProject, language__code=language_code, project__code=project_code)
return f(request, translation_project, *args, **kwargs)
return decorated_f
def set_request_context(f):
def decorated_f(request, translation_project, *args, **kwargs):
# For now, all permissions in a translation project are
# relative to the root of that translation project.
request.permissions = get_matching_permissions(
get_profile(request.user), translation_project.directory)
request.translation_project = translation_project
return f(request, translation_project, *args, **kwargs)
return decorated_f
################################################################################
@get_translation_project
@set_request_context
def translate(request, translation_project, dir_path=None):
if dir_path:
pootle_path = translation_project.pootle_path + dir_path
units_query = Unit.objects.filter(store__pootle_path__startswith=pootle_path)
else:
units_query = Unit.objects.filter(store__translation_project=translation_project)
return translate_page(request, units_query)
@get_translation_project
@set_request_context
def commit_file(request, translation_project, file_path):
if not check_permission("commit", request):
raise PermissionDenied(_("You do not have rights to commit files here"))
pootle_path = translation_project.directory.pootle_path + file_path
store = get_object_or_404(Store, pootle_path=pootle_path)
result = translation_project.commitpofile(request, store)
return redirect(dispatch.show_directory(request, translation_project.directory.pootle_path))
@get_translation_project
@set_request_context
def update_file(request, translation_project, file_path):
if not check_permission("commit", request):
raise PermissionDenied(_("You do not have rights to update files here"))
pootle_path = translation_project.directory.pootle_path + file_path
store = get_object_or_404(Store, pootle_path=pootle_path)
result = translation_project.update_file(request, store)
return redirect(dispatch.show_directory(request, translation_project.directory.pootle_path)) | unknown | codeparrot/codeparrot-clean | ||
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Data generation
# ---------------
#
# We use the digits dataset. We only use a subset of randomly selected samples.
import numpy as np
from sklearn import datasets
digits = datasets.load_digits()
rng = np.random.RandomState(2)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
# %%
#
# We selected 340 samples of which only 40 will be associated with a known label.
# Therefore, we store the indices of the 300 other samples for which we are not
# supposed to know their labels.
X = digits.data[indices[:340]]
y = digits.target[indices[:340]]
images = digits.images[indices[:340]]
n_total_samples = len(y)
n_labeled_points = 40
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# %%
# Shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
# %%
# Semi-supervised learning
# ------------------------
#
# We fit a :class:`~sklearn.semi_supervised.LabelSpreading` and use it to predict
# the unknown labels.
from sklearn.metrics import classification_report
from sklearn.semi_supervised import LabelSpreading
lp_model = LabelSpreading(gamma=0.25, max_iter=20)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
print(
"Label Spreading model: %d labeled & %d unlabeled points (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples)
)
# %%
# Classification report
print(classification_report(true_labels, predicted_labels))
# %%
# Confusion matrix
from sklearn.metrics import ConfusionMatrixDisplay
ConfusionMatrixDisplay.from_predictions(
true_labels, predicted_labels, labels=lp_model.classes_
)
# %%
# Plot the most uncertain predictions
# -----------------------------------
#
# Here, we will pick and show the 10 most uncertain predictions.
from scipy import stats
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# %%
# Pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
# %%
# Plot
import matplotlib.pyplot as plt
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title(
"predict: %i\ntrue: %i" % (lp_model.transduction_[image_index], y[image_index])
)
f.suptitle("Learning with small amount of labeled data")
plt.show() | python | github | https://github.com/scikit-learn/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py |
import os
import sys
from flask import logging
from app import current_app as app, celery
from app.models import db
from app.models.setting import Environment
from app.settings import set_settings
from populate_db import populate
_basedir = os.path.abspath(os.path.dirname(__file__))
class Setup(object):
@staticmethod
def create_app():
app.config.from_object('config.TestingConfig')
app.config['INTEGRATE_SOCKETIO'] = False
app.secret_key = 'super secret key'
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.ERROR)
celery.conf.update(app.config)
with app.test_request_context():
db.create_all()
populate()
set_settings(secret='super secret key', app_name='Open Event', app_environment=Environment.TESTING)
return app.test_client()
@staticmethod
def drop_db():
with app.test_request_context():
db.session.remove()
if app.config['SQLALCHEMY_DATABASE_URI'].find('postgresql://') > -1:
# drop_all has problems with foreign keys in postgres database (cyclic dependency)
db.engine.execute("drop schema if exists public cascade")
db.engine.execute("create schema public")
else:
# drop all works for SQLite and should work for other DBMS like MySQL, Mongo etc
db.drop_all() | unknown | codeparrot/codeparrot-clean | ||
/*
MIT License http://www.opensource.org/licenses/mit-license.php
*/
"use strict";
const RuntimeGlobals = require("../RuntimeGlobals");
const StartupChunkDependenciesRuntimeModule = require("./StartupChunkDependenciesRuntimeModule");
const StartupEntrypointRuntimeModule = require("./StartupEntrypointRuntimeModule");
/** @typedef {import("../../declarations/WebpackOptions").ChunkLoadingType} ChunkLoadingType */
/** @typedef {import("../Chunk")} Chunk */
/** @typedef {import("../Compiler")} Compiler */
/**
* @typedef {object} Options
* @property {ChunkLoadingType} chunkLoading
* @property {boolean=} asyncChunkLoading
*/
const PLUGIN_NAME = "StartupChunkDependenciesPlugin";
class StartupChunkDependenciesPlugin {
/**
* @param {Options} options options
*/
constructor(options) {
/** @type {ChunkLoadingType} */
this.chunkLoading = options.chunkLoading;
/** @type {boolean} */
this.asyncChunkLoading =
typeof options.asyncChunkLoading === "boolean"
? options.asyncChunkLoading
: true;
}
/**
* Apply the plugin
* @param {Compiler} compiler the compiler instance
* @returns {void}
*/
apply(compiler) {
compiler.hooks.thisCompilation.tap(PLUGIN_NAME, (compilation) => {
const globalChunkLoading = compilation.outputOptions.chunkLoading;
/**
* @param {Chunk} chunk chunk to check
* @returns {boolean} true, when the plugin is enabled for the chunk
*/
const isEnabledForChunk = (chunk) => {
const options = chunk.getEntryOptions();
const chunkLoading =
options && options.chunkLoading !== undefined
? options.chunkLoading
: globalChunkLoading;
return chunkLoading === this.chunkLoading;
};
compilation.hooks.additionalTreeRuntimeRequirements.tap(
PLUGIN_NAME,
(chunk, set, { chunkGraph }) => {
if (!isEnabledForChunk(chunk)) return;
if (chunkGraph.hasChunkEntryDependentChunks(chunk)) {
set.add(RuntimeGlobals.startup);
set.add(RuntimeGlobals.ensureChunk);
set.add(RuntimeGlobals.ensureChunkIncludeEntries);
compilation.addRuntimeModule(
chunk,
new StartupChunkDependenciesRuntimeModule(this.asyncChunkLoading)
);
}
}
);
compilation.hooks.runtimeRequirementInTree
.for(RuntimeGlobals.startupEntrypoint)
.tap(PLUGIN_NAME, (chunk, set) => {
if (!isEnabledForChunk(chunk)) return;
set.add(RuntimeGlobals.require);
set.add(RuntimeGlobals.ensureChunk);
set.add(RuntimeGlobals.ensureChunkIncludeEntries);
compilation.addRuntimeModule(
chunk,
new StartupEntrypointRuntimeModule(this.asyncChunkLoading)
);
});
});
}
}
module.exports = StartupChunkDependenciesPlugin; | javascript | github | https://github.com/webpack/webpack | lib/runtime/StartupChunkDependenciesPlugin.js |
#!/usr/bin/env python
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Displays a rotating torus using OpenGL.
This example demonstrates:
* Using a 3D projection on a window by overriding the default on_resize
handler
* Enabling multisampling if available
* Drawing a simple 3D primitive using vertex and index arrays
* Using a display list
* Fixed-pipeline lighting
'''
from math import pi, sin, cos
from pyglet.gl import *
import pyglet
try:
# Try and create a window with multisampling (antialiasing)
config = Config(sample_buffers=1, samples=4,
depth_size=16, double_buffer=True,)
window = pyglet.window.Window(resizable=True, config=config)
except pyglet.window.NoSuchConfigException:
# Fall back to no multisampling for old hardware
window = pyglet.window.Window(resizable=True)
@window.event
def on_resize(width, height):
# Override the default on_resize handler to create a 3D projection
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., width / float(height), .1, 1000.)
glMatrixMode(GL_MODELVIEW)
return pyglet.event.EVENT_HANDLED
def update(dt):
global rx, ry, rz
rx += dt * 1
ry += dt * 80
rz += dt * 30
rx %= 360
ry %= 360
rz %= 360
pyglet.clock.schedule(update)
@window.event
def on_draw():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
glTranslatef(0, 0, -4)
glRotatef(rz, 0, 0, 1)
glRotatef(ry, 0, 1, 0)
glRotatef(rx, 1, 0, 0)
torus.draw()
def setup():
# One-time GL setup
glClearColor(1, 1, 1, 1)
glColor3f(1, 0, 0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_CULL_FACE)
# Uncomment this line for a wireframe view
#glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# Simple light setup. On Windows GL_LIGHT0 is enabled by default,
# but this is not the case on Linux or Mac, so remember to always
# include it.
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_LIGHT1)
# Define a simple function to create ctypes arrays of floats:
def vec(*args):
return (GLfloat * len(args))(*args)
glLightfv(GL_LIGHT0, GL_POSITION, vec(.5, .5, 1, 0))
glLightfv(GL_LIGHT0, GL_SPECULAR, vec(.5, .5, 1, 1))
glLightfv(GL_LIGHT0, GL_DIFFUSE, vec(1, 1, 1, 1))
glLightfv(GL_LIGHT1, GL_POSITION, vec(1, 0, .5, 0))
glLightfv(GL_LIGHT1, GL_DIFFUSE, vec(.5, .5, .5, 1))
glLightfv(GL_LIGHT1, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, vec(0.5, 0, 0.3, 1))
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, vec(1, 1, 1, 1))
glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, 50)
class Torus(object):
def __init__(self, radius, inner_radius, slices, inner_slices):
# Create the vertex and normal arrays.
vertices = []
normals = []
u_step = 2 * pi / (slices - 1)
v_step = 2 * pi / (inner_slices - 1)
u = 0.
for i in range(slices):
cos_u = cos(u)
sin_u = sin(u)
v = 0.
for j in range(inner_slices):
cos_v = cos(v)
sin_v = sin(v)
d = (radius + inner_radius * cos_v)
x = d * cos_u
y = d * sin_u
z = inner_radius * sin_v
nx = cos_u * cos_v
ny = sin_u * cos_v
nz = sin_v
vertices.extend([x, y, z])
normals.extend([nx, ny, nz])
v += v_step
u += u_step
# Create ctypes arrays of the lists
vertices = (GLfloat * len(vertices))(*vertices)
normals = (GLfloat * len(normals))(*normals)
# Create a list of triangle indices.
indices = []
for i in range(slices - 1):
for j in range(inner_slices - 1):
p = i * inner_slices + j
indices.extend([p, p + inner_slices, p + inner_slices + 1])
indices.extend([p, p + inner_slices + 1, p + 1])
indices = (GLuint * len(indices))(*indices)
# Compile a display list
self.list = glGenLists(1)
glNewList(self.list, GL_COMPILE)
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_NORMAL_ARRAY)
glVertexPointer(3, GL_FLOAT, 0, vertices)
glNormalPointer(GL_FLOAT, 0, normals)
glDrawElements(GL_TRIANGLES, len(indices), GL_UNSIGNED_INT, indices)
glPopClientAttrib()
glEndList()
def draw(self):
glCallList(self.list)
setup()
torus = Torus(1, 0.3, 50, 30)
rx = ry = rz = 0
pyglet.app.run() | unknown | codeparrot/codeparrot-clean | ||
########################################################################
#
# File Name: HTMLModElement
#
#
### This file is automatically generated by GenerateHtml.py.
### DO NOT EDIT!
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import string
from xml.dom import Node
from xml.dom.html.HTMLElement import HTMLElement
class HTMLModElement(HTMLElement):
def __init__(self, ownerDocument, nodeName="MOD"):
HTMLElement.__init__(self, ownerDocument, nodeName)
### Attribute Methods ###
def _get_cite(self):
return self.getAttribute("CITE")
def _set_cite(self, value):
self.setAttribute("CITE", value)
def _get_dateTime(self):
return self.getAttribute("DATETIME")
def _set_dateTime(self, value):
self.setAttribute("DATETIME", value)
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update({
"cite" : _get_cite,
"dateTime" : _get_dateTime
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update({
"cite" : _set_cite,
"dateTime" : _set_dateTime
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys()) | unknown | codeparrot/codeparrot-clean | ||
"""Newton-CG trust-region optimization."""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
import scipy.linalg
from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem)
__all__ = []
def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None,
**trust_region_options):
"""
Minimization of scalar function of one or more variables using
the Newton conjugate gradient trust-region algorithm.
Options
-------
initial_trust_radius : float
Initial trust-region radius.
max_trust_radius : float
Maximum value of the trust-region radius. No steps that are longer
than this value will be proposed.
eta : float
Trust region related acceptance stringency for proposed steps.
gtol : float
Gradient norm must be less than `gtol` before successful
termination.
"""
if jac is None:
raise ValueError('Jacobian is required for Newton-CG trust-region '
'minimization')
if hess is None and hessp is None:
raise ValueError('Either the Hessian or the Hessian-vector product '
'is required for Newton-CG trust-region minimization')
return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess,
hessp=hessp, subproblem=CGSteihaugSubproblem,
**trust_region_options)
class CGSteihaugSubproblem(BaseQuadraticSubproblem):
"""Quadratic subproblem solved by a conjugate gradient method"""
def solve(self, trust_radius):
"""
Solve the subproblem using a conjugate gradient method.
Parameters
----------
trust_radius : float
We are allowed to wander only this far away from the origin.
Returns
-------
p : ndarray
The proposed step.
hits_boundary : bool
True if the proposed step is on the boundary of the trust region.
Notes
-----
This is algorithm (7.2) of Nocedal and Wright 2nd edition.
Only the function that computes the Hessian-vector product is required.
The Hessian itself is not required, and the Hessian does
not need to be positive semidefinite.
"""
# get the norm of jacobian and define the origin
p_origin = np.zeros_like(self.jac)
# define a default tolerance
tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag
# Stop the method if the search direction
# is a direction of nonpositive curvature.
if self.jac_mag < tolerance:
hits_boundary = False
return p_origin, hits_boundary
# init the state for the first iteration
z = p_origin
r = self.jac
d = -r
# Search for the min of the approximation of the objective function.
while True:
# do an iteration
Bd = self.hessp(d)
dBd = np.dot(d, Bd)
if dBd <= 0:
# Look at the two boundary points.
# Find both values of t to get the boundary points such that
# ||z + t d|| == trust_radius
# and then choose the one with the predicted min value.
ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
pa = z + ta * d
pb = z + tb * d
if self(pa) < self(pb):
p_boundary = pa
else:
p_boundary = pb
hits_boundary = True
return p_boundary, hits_boundary
r_squared = np.dot(r, r)
alpha = r_squared / dBd
z_next = z + alpha * d
if scipy.linalg.norm(z_next) >= trust_radius:
# Find t >= 0 to get the boundary point such that
# ||z + t d|| == trust_radius
ta, tb = self.get_boundaries_intersections(z, d, trust_radius)
p_boundary = z + tb * d
hits_boundary = True
return p_boundary, hits_boundary
r_next = r + alpha * Bd
r_next_squared = np.dot(r_next, r_next)
if math.sqrt(r_next_squared) < tolerance:
hits_boundary = False
return z_next, hits_boundary
beta_next = r_next_squared / r_squared
d_next = -r_next + beta_next * d
# update the state for the next iteration
z = z_next
r = r_next
d = d_next | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from _hardware import Expectation
from _hardware_android import HardwareAndroid
CPU_CLOCK_RATE = 2035200
MEM_CLOCK_RATE = 13763
GPU_CLOCK_RATE = 670000000
GPU_POWER_LEVEL = 1 # lower is faster, minimum is 0
class HardwarePixel2(HardwareAndroid):
def __init__(self, adb):
HardwareAndroid.__init__(self, adb)
def __enter__(self):
HardwareAndroid.__enter__(self)
if not self._adb.is_root():
return self
self._adb.shell('\n'.join([
'''
stop thermal-engine
stop perfd''',
# turn off the slow cores and one fast core
'''
for N in 0 1 2 3 7; do
echo 0 > /sys/devices/system/cpu/cpu$N/online
done''',
# lock 3 fast cores: two for Skia and one for the OS
'''
for N in 4 5 6; do
echo 1 > /sys/devices/system/cpu/cpu$N/online
echo userspace > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_governor
echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_max_freq
echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_min_freq
echo %i > /sys/devices/system/cpu/cpu$N/cpufreq/scaling_setspeed
done''' % tuple(CPU_CLOCK_RATE for _ in range(3)),
# Set GPU bus and idle timer
'''
echo 0 > /sys/class/kgsl/kgsl-3d0/bus_split''',
# csmartdalton, 4-26-2018: this line hangs my device
# echo 1 > /sys/class/kgsl/kgsl-3d0/force_clk_on
'''
echo 10000 > /sys/class/kgsl/kgsl-3d0/idle_timer''',
# Set mem frequency to max
'''
echo %i > /sys/class/devfreq/soc\:qcom,gpubw/min_freq
echo %i > /sys/class/devfreq/soc\:qcom,gpubw/max_freq
echo %i > /sys/class/devfreq/soc\:qcom,cpubw/min_freq
echo %i > /sys/class/devfreq/soc\:qcom,cpubw/max_freq
echo %i > /sys/class/devfreq/soc\:qcom,mincpubw/min_freq
echo %i > /sys/class/devfreq/soc\:qcom,mincpubw/max_freq
echo %i > /sys/class/devfreq/soc\:qcom,memlat-cpu0/min_freq
echo %i > /sys/class/devfreq/soc\:qcom,memlat-cpu0/max_freq''' %
tuple(MEM_CLOCK_RATE for _ in range(8)),
# Set GPU to performance mode
'''
echo performance > /sys/class/kgsl/kgsl-3d0/devfreq/governor
echo %i > /sys/class/kgsl/kgsl-3d0/devfreq/max_freq
echo %i > /sys/class/kgsl/kgsl-3d0/devfreq/min_freq''' %
tuple(GPU_CLOCK_RATE for _ in range(2)),
# Set GPU power level
'''
echo %i > /sys/class/kgsl/kgsl-3d0/max_pwrlevel
echo %i > /sys/class/kgsl/kgsl-3d0/min_pwrlevel''' %
tuple(GPU_POWER_LEVEL for _ in range(2))]))
assert('msm_therm' == self._adb.check(\
'cat /sys/class/thermal/thermal_zone10/type').strip())
assert('pm8998_tz' == self._adb.check(\
'cat /sys/class/thermal/thermal_zone7/type').strip())
return self
def sanity_check(self):
HardwareAndroid.sanity_check(self)
if not self._adb.is_root():
return
result = self._adb.check(' '.join(
['cat',
'/sys/class/power_supply/battery/capacity',
'/sys/devices/system/cpu/online'] + \
['/sys/devices/system/cpu/cpu%i/cpufreq/scaling_cur_freq' % i
for i in range(4, 7)] + \
# Unfortunately we can't monitor the gpu clock:
#
# /sys/class/kgsl/kgsl-3d0/devfreq/cur_freq
#
# It doesn't respect the min_freq/max_freq values when not under load.
['/sys/kernel/debug/clk/bimc_clk/measure',
'/sys/class/kgsl/kgsl-3d0/temp',
'/sys/class/kgsl/kgsl-3d0/throttling',
'/sys/class/thermal/thermal_zone10/temp',
'/sys/class/thermal/thermal_zone7/temp']))
expectations = \
[Expectation(int, min_value=30, name='battery', sleeptime=30*60),
Expectation(str, exact_value='4-6', name='online cpus')] + \
[Expectation(int, exact_value=CPU_CLOCK_RATE, name='cpu_%i clock rate' %i)
for i in range(4, 7)] + \
[Expectation(long, min_value=902390000, max_value=902409999,
name='measured ddr clock', sleeptime=10),
Expectation(int, max_value=750, name='gpu temperature'),
Expectation(int, exact_value=1, name='gpu throttling'),
Expectation(int, max_value=75, name='msm_therm temperature'),
Expectation(int, max_value=75000, name='pm8998_tz temperature')]
Expectation.check_all(expectations, result.splitlines()) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
import unittest
import urllib
import urllib2
import re
import MySQLdb
import environment
import utils
import tablet
from mysql_flavor import mysql_flavor
from protocols_flavor import protocols_flavor
from vtproto import topodata_pb2
tablet_62344 = tablet.Tablet(62344)
tablet_62044 = tablet.Tablet(62044)
# regexp to check if the tablet status page reports healthy,
# regardless of actual replication lag
healthy_expr = re.compile(r'Current status: <span.+?>healthy')
def setUpModule():
try:
topo_flavor = environment.topo_server().flavor()
if topo_flavor == 'zk2':
# This is a one-off test to make sure our 'zk2' implementation
# behave with a server that is not DNS-resolveable.
environment.topo_server().setup(add_bad_host=True)
else:
environment.topo_server().setup()
# start mysql instance external to the test
setup_procs = [
tablet_62344.init_mysql(),
tablet_62044.init_mysql(),
]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
tablet_62344.teardown_mysql(),
tablet_62044.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
tablet_62344.remove_tree()
tablet_62044.remove_tree()
class TestTabletManager(unittest.TestCase):
def tearDown(self):
tablet.Tablet.check_vttablet_count()
environment.topo_server().wipe()
for t in [tablet_62344, tablet_62044]:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
# run twice to check behavior with existing znode data
def test_sanity(self):
self._test_sanity()
self._test_sanity()
def _test_sanity(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', '-force', 'test_keyspace'])
utils.run_vtctl(['createshard', '-force', 'test_keyspace/0'])
tablet_62344.init_tablet('master', 'test_keyspace', '0', parent=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'])
utils.validate_topology()
# if these statements don't run before the tablet it will wedge
# waiting for the db to become accessible. this is more a bug than
# a feature.
tablet_62344.populate('vt_test_keyspace', self._create_vt_select_test,
self._populate_vt_select_test)
tablet_62344.start_vttablet()
# make sure the query service is started right away.
qr = tablet_62344.execute('select id, msg from vt_select_test')
self.assertEqual(len(qr['rows']), 4,
'expected 4 rows in vt_select_test: %s' % str(qr))
self.assertEqual(qr['fields'][0]['name'], 'id')
self.assertEqual(qr['fields'][1]['name'], 'msg')
# test exclude_field_names to vttablet works as expected.
qr = tablet_62344.execute('select id, msg from vt_select_test',
execute_options='included_fields:TYPE_ONLY ')
self.assertEqual(len(qr['rows']), 4,
'expected 4 rows in vt_select_test: %s' % str(qr))
self.assertNotIn('name', qr['fields'][0])
self.assertNotIn('name', qr['fields'][1])
# make sure direct dba queries work
query_result = utils.run_vtctl_json(
['ExecuteFetchAsDba', '-json', tablet_62344.tablet_alias,
'select * from vt_test_keyspace.vt_select_test'])
self.assertEqual(
len(query_result['rows']), 4,
'expected 4 rows in vt_select_test: %s' % str(query_result))
self.assertEqual(
len(query_result['fields']), 2,
'expected 2 fields in vt_select_test: %s' % str(query_result))
# check Ping / RefreshState / RefreshStateByShard
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
utils.run_vtctl(['RefreshState', tablet_62344.tablet_alias])
utils.run_vtctl(['RefreshStateByShard', 'test_keyspace/0'])
utils.run_vtctl(['RefreshStateByShard', '--cells=test_nj',
'test_keyspace/0'])
# Quickly check basic actions.
utils.run_vtctl(['SetReadOnly', tablet_62344.tablet_alias])
utils.wait_db_read_only(62344)
utils.run_vtctl(['SetReadWrite', tablet_62344.tablet_alias])
utils.check_db_read_write(62344)
utils.validate_topology()
utils.run_vtctl(['ValidateKeyspace', 'test_keyspace'])
# not pinging tablets, as it enables replication checks, and they
# break because we only have a single master, no slaves
utils.run_vtctl(['ValidateShard', '-ping-tablets=false',
'test_keyspace/0'])
tablet_62344.kill_vttablet()
_create_vt_select_test = '''create table vt_select_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
_populate_vt_select_test = [
"insert into vt_select_test (msg) values ('test %s')" % x
for x in xrange(4)]
def test_actions_and_timeouts(self):
# Start up a master mysql and vttablet
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.init_tablet('master', 'test_keyspace', '0')
utils.validate_topology()
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.start_vttablet()
utils.run_vtctl(['Ping', tablet_62344.tablet_alias])
# schedule long action in the background, sleep a little bit to make sure
# it started to run
args = (environment.binary_args('vtctl') +
environment.topo_server().flags() +
['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol(),
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-log_dir', environment.vtlogroot,
'Sleep', tablet_62344.tablet_alias, '10s'])
bg = utils.run_bg(args)
time.sleep(3)
# try a frontend RefreshState that should timeout as the tablet is busy
# running the other one
_, stderr = utils.run_vtctl(
['-wait-time', '3s', 'RefreshState', tablet_62344.tablet_alias],
expect_fail=True)
self.assertIn(protocols_flavor().rpc_timeout_message(), stderr)
# wait for the background vtctl
bg.wait()
tablet_62344.kill_vttablet()
def _run_hook(self, params, expected_status, expected_stdout,
expected_stderr):
hr = utils.run_vtctl_json(['ExecuteHook', tablet_62344.tablet_alias] +
params)
self.assertEqual(hr['ExitStatus'], expected_status)
if isinstance(expected_stdout, basestring):
self.assertEqual(hr['Stdout'], expected_stdout)
else:
found = False
for exp in expected_stdout:
if hr['Stdout'] == exp:
found = True
break
if not found:
self.assertFail(
'cannot find expected %s in %s' %
(str(expected_stdout), hr['Stdout']))
if expected_stderr[-1:] == '%':
self.assertEqual(
hr['Stderr'][:len(expected_stderr)-1],
expected_stderr[:len(expected_stderr)-1])
else:
self.assertEqual(hr['Stderr'], expected_stderr)
def test_hook(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
# create the database so vttablets start, as it is serving
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)
# test a regular program works
self._run_hook(['test.sh', '--flag1', '--param1=hello'], 0,
['TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --flag1\n'
'PARAM: --param1=hello\n',
'TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --param1=hello\n'
'PARAM: --flag1\n'],
'')
# test stderr output
self._run_hook(['test.sh', '--to-stderr'], 0,
'TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --to-stderr\n',
'ERR: --to-stderr\n')
# test commands that fail
self._run_hook(['test.sh', '--exit-error'], 1,
'TABLET_ALIAS: test_nj-0000062344\n'
'PARAM: --exit-error\n',
'ERROR: exit status 1\n')
# test hook that is not present
self._run_hook(['not_here.sh'], -1,
'',
'missing hook /%') # cannot go further, local path
# test hook with invalid name
_, err = utils.run_vtctl(['--alsologtostderr', 'ExecuteHook',
tablet_62344.tablet_alias,
'/bin/ls'],
mode=utils.VTCTL_VTCTL, trap_output=True,
raise_on_error=False)
expected = "action failed: ExecuteHook hook name cannot have a '/' in it"
self.assertIn(expected, err)
tablet_62344.kill_vttablet()
def test_shard_replication_fix(self):
utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])
tablet_62344.create_db('vt_test_keyspace')
tablet_62044.create_db('vt_test_keyspace')
# one master one replica
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0')
# make sure the replica is in the replication graph
before_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(before_bogus['nodes']),
'wrong shard replication nodes before: %s' %
str(before_bogus))
# manually add a bogus entry to the replication graph, and check
# it is removed by ShardReplicationFix
utils.run_vtctl(['ShardReplicationAdd', 'test_keyspace/0',
'test_nj-0000066666'], auto_log=True)
with_bogus = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(3, len(with_bogus['nodes']),
'wrong shard replication nodes with bogus: %s' %
str(with_bogus))
utils.run_vtctl(['ShardReplicationFix', 'test_nj', 'test_keyspace/0'],
auto_log=True)
after_fix = utils.run_vtctl_json(['GetShardReplication', 'test_nj',
'test_keyspace/0'])
self.assertEqual(2, len(after_fix['nodes']),
'wrong shard replication nodes after fix: %s' %
str(after_fix))
def check_healthz(self, t, expected):
if expected:
self.assertEqual('ok\n', t.get_healthz())
else:
with self.assertRaises(urllib2.HTTPError):
t.get_healthz()
def test_health_check(self):
# one master, one replica that starts not initialized
# (for the replica, we let vttablet do the InitTablet)
tablet_62344.init_tablet('replica', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
tablet_62344.start_vttablet(wait_for_state=None)
tablet_62044.start_vttablet(wait_for_state=None,
lameduck_period='5s',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
tablet_62344.wait_for_vttablet_state('NOT_SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62044, False)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# make sure the unhealthy slave goes to healthy
tablet_62044.wait_for_vttablet_state('SERVING')
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
self.check_healthz(tablet_62044, True)
# make sure the master is still master
ti = utils.run_vtctl_json(['GetTablet', tablet_62344.tablet_alias])
self.assertEqual(ti['type'], topodata_pb2.MASTER,
'unexpected master type: %s' % ti['type'])
# stop replication at the mysql level.
tablet_62044.mquery('', 'stop slave')
# vttablet replication_reporter should restart it.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
# insert something on the master and wait for it on the slave.
tablet_62344.mquery('vt_test_keyspace', [
'create table repl_test_table (id int)',
'insert into repl_test_table values (123)'], write=True)
timeout = 10.0
while True:
try:
result = tablet_62044.mquery('vt_test_keyspace',
'select * from repl_test_table')
if result:
self.assertEqual(result[0][0], 123L)
break
except MySQLdb.ProgrammingError:
# Maybe the create table hasn't gone trough yet, we wait more
logging.exception('got this exception waiting for data, ignoring it')
timeout = utils.wait_step(
'slave replication repaired by replication_reporter', timeout)
# stop replication, make sure we don't go unhealthy.
# (we have a baseline as well, so the time should be good).
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
self.check_healthz(tablet_62044, True)
# make sure status web page is healthy
self.assertRegexpMatches(tablet_62044.get_status(), healthy_expr)
# make sure the health stream is updated
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
self.assertTrue(('seconds_behind_master' not in health['realtime_stats']) or
(health['realtime_stats']['seconds_behind_master'] < 30),
'got unexpected health: %s' % str(health))
self.assertIn('serving', health)
# then restart replication, make sure we stay healthy
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
# make sure status web page is healthy
self.assertRegexpMatches(tablet_62044.get_status(), healthy_expr)
# now test VtTabletStreamHealth returns the right thing
stdout, _ = utils.run_vtctl(['VtTabletStreamHealth',
'-count', '2',
tablet_62044.tablet_alias],
trap_output=True, auto_log=True)
lines = stdout.splitlines()
self.assertEqual(len(lines), 2)
for line in lines:
logging.debug('Got health: %s', line)
data = json.loads(line)
self.assertIn('realtime_stats', data)
self.assertIn('serving', data)
self.assertTrue(data['serving'])
self.assertNotIn('health_error', data['realtime_stats'])
self.assertNotIn('tablet_externally_reparented_timestamp', data)
self.assertEqual('test_keyspace', data['target']['keyspace'])
self.assertEqual('0', data['target']['shard'])
self.assertEqual(topodata_pb2.REPLICA, data['target']['tablet_type'])
# Test that VtTabletStreamHealth reports a QPS >0.0.
# Therefore, issue several reads first.
# NOTE: This may be potentially flaky because we'll observe a QPS >0.0
# exactly "once" for the duration of one sampling interval (5s) and
# after that we'll see 0.0 QPS rates again. If this becomes actually
# flaky, we need to read continuously in a separate thread.
for _ in range(10):
tablet_62044.execute('select 1 from dual')
# This may take up to 5 seconds to become true because we sample the query
# counts for the rates only every 5 seconds (see query_service_stats.go).
timeout = 10
while True:
health = utils.run_vtctl_json(['VtTabletStreamHealth', '-count', '1',
tablet_62044.tablet_alias])
if health['realtime_stats'].get('qps', 0.0) > 0.0:
break
timeout = utils.wait_step('QPS >0.0 seen', timeout)
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_health_check_drained_state_does_not_shutdown_query_service(self):
# This test is similar to test_health_check, but has the following
# differences:
# - the second tablet is an 'rdonly' and not a 'replica'
# - the second tablet will be set to 'drained' and we expect that
# the query service won't be shutdown
# Setup master and rdonly tablets.
tablet_62344.init_tablet('replica', 'test_keyspace', '0')
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
# Note we only have a master and a rdonly. So we can't enable
# semi-sync in this case, as the rdonly slaves don't semi-sync ack.
tablet_62344.start_vttablet(wait_for_state=None, enable_semi_sync=False)
tablet_62044.start_vttablet(wait_for_state=None,
init_tablet_type='rdonly',
init_keyspace='test_keyspace',
init_shard='0',
enable_semi_sync=False)
tablet_62344.wait_for_vttablet_state('NOT_SERVING')
tablet_62044.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62044, False)
# Enable replication.
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# Trigger healthcheck to save time waiting for the next interval.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
tablet_62044.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62044, True)
# Change from rdonly to drained and stop replication. (These
# actions are similar to the SplitClone vtworker command
# implementation.) The tablet will stay healthy, and the
# query service is still running.
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'drained'])
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias])
# Trigger healthcheck explicitly to avoid waiting for the next interval.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
utils.wait_for_tablet_type(tablet_62044.tablet_alias, 'drained')
self.check_healthz(tablet_62044, True)
# Query service is still running.
tablet_62044.wait_for_vttablet_state('SERVING')
# Restart replication. Tablet will become healthy again.
utils.run_vtctl(['ChangeSlaveType', tablet_62044.tablet_alias, 'rdonly'])
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias])
self.check_healthz(tablet_62044, True)
# kill the tablets
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_no_mysql_healthcheck(self):
"""This test starts a vttablet with no mysql port, while mysql is down.
It makes sure vttablet will start properly and be unhealthy.
Then we start mysql, and make sure vttablet becomes healthy.
"""
# we need replication to be enabled, so the slave tablet can be healthy.
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
pos = mysql_flavor().master_position(tablet_62344)
# Use 'localhost' as hostname because Travis CI worker hostnames
# are too long for MySQL replication.
change_master_cmds = mysql_flavor().change_master_commands(
'localhost',
tablet_62344.mysql_port,
pos)
tablet_62044.mquery('', ['RESET MASTER', 'RESET SLAVE'] +
change_master_cmds + ['START SLAVE'])
# now shutdown all mysqld
shutdown_procs = [
tablet_62344.shutdown_mysql(),
tablet_62044.shutdown_mysql(),
]
utils.wait_procs(shutdown_procs)
# start the tablets, wait for them to be NOT_SERVING (mysqld not there)
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62044.init_tablet('replica', 'test_keyspace', '0',
include_mysql_port=False)
for t in tablet_62344, tablet_62044:
# Since MySQL is down at this point and we want the tablet to start up
# successfully, we have to use supports_backups=False.
t.start_vttablet(wait_for_state=None, supports_backups=False,
full_mycnf_args=True, include_mysql_port=False)
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(t, False)
# Tell slave to not try to repair replication in healthcheck.
# The StopSlave will ultimately fail because mysqld is not running,
# But vttablet should remember that it's not supposed to fix replication.
utils.run_vtctl(['StopSlave', tablet_62044.tablet_alias], expect_fail=True)
# The above notice to not fix replication should survive tablet restart.
tablet_62044.kill_vttablet()
tablet_62044.start_vttablet(wait_for_state='NOT_SERVING',
full_mycnf_args=True, include_mysql_port=False,
supports_backups=False)
# restart mysqld
start_procs = [
tablet_62344.start_mysql(),
tablet_62044.start_mysql(),
]
utils.wait_procs(start_procs)
# the master should still be healthy
utils.run_vtctl(['RunHealthCheck', tablet_62344.tablet_alias],
auto_log=True)
self.check_healthz(tablet_62344, True)
# the slave will now be healthy, but report a very high replication
# lag, because it can't figure out what it exactly is.
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias],
auto_log=True)
tablet_62044.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62044, True)
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
self.assertTrue('seconds_behind_master' in health['realtime_stats'])
self.assertEqual(health['realtime_stats']['seconds_behind_master'], 7200)
self.assertIn('serving', health)
# restart replication, wait until health check goes small
# (a value of zero is default and won't be in structure)
utils.run_vtctl(['StartSlave', tablet_62044.tablet_alias])
timeout = 10
while True:
utils.run_vtctl(['RunHealthCheck', tablet_62044.tablet_alias],
auto_log=True)
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
tablet_62044.tablet_alias])
if 'serving' in health and (
('seconds_behind_master' not in health['realtime_stats']) or
(health['realtime_stats']['seconds_behind_master'] < 30)):
break
timeout = utils.wait_step('health delay goes back down', timeout)
# wait for the tablet to fix its mysql port
for t in tablet_62344, tablet_62044:
# wait for mysql port to show up
timeout = 10
while True:
ti = utils.run_vtctl_json(['GetTablet', t.tablet_alias])
if 'mysql' in ti['port_map']:
break
timeout = utils.wait_step('mysql port in tablet record', timeout)
self.assertEqual(ti['port_map']['mysql'], t.mysql_port)
# all done
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_repeated_init_shard_master(self):
"""Test that using InitShardMaster can go back and forth between 2 hosts."""
for t in tablet_62344, tablet_62044:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None,
lameduck_period='5s',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# Tablets are not replicating, so they won't be healthy.
for t in tablet_62344, tablet_62044:
t.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(t, False)
# Pick one master out of the two.
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# Run health check on both, make sure they are both healthy.
# Also make sure the types are correct.
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias], auto_log=True)
self.check_healthz(t, True)
utils.wait_for_tablet_type(tablet_62344.tablet_alias, 'master', timeout=0)
utils.wait_for_tablet_type(tablet_62044.tablet_alias, 'replica', timeout=0)
# Pick the other one as master, make sure they are still healthy.
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62044.tablet_alias])
# Run health check on both, make sure they are both healthy.
# Also make sure the types are correct.
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias], auto_log=True)
self.check_healthz(t, True)
utils.wait_for_tablet_type(tablet_62344.tablet_alias, 'replica', timeout=0)
utils.wait_for_tablet_type(tablet_62044.tablet_alias, 'master', timeout=0)
# Come back to the original guy.
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
tablet_62344.tablet_alias])
# Run health check on both, make sure they are both healthy.
# Also make sure the types are correct.
for t in tablet_62344, tablet_62044:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias], auto_log=True)
self.check_healthz(t, True)
utils.wait_for_tablet_type(tablet_62344.tablet_alias, 'master', timeout=0)
utils.wait_for_tablet_type(tablet_62044.tablet_alias, 'replica', timeout=0)
# And done.
tablet.kill_tablets([tablet_62344, tablet_62044])
def test_fallback_policy(self):
tablet_62344.create_db('vt_test_keyspace')
tablet_62344.init_tablet('master', 'test_keyspace', '0')
tablet_62344.start_vttablet(security_policy='bogus')
f = urllib.urlopen('http://localhost:%d/queryz' % int(tablet_62344.port))
response = f.read()
f.close()
self.assertIn('not allowed', response)
tablet_62344.kill_vttablet()
def test_ignore_health_error(self):
tablet_62344.create_db('vt_test_keyspace')
# Starts unhealthy because of "no slave status" (not replicating).
tablet_62344.start_vttablet(wait_for_state='NOT_SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# Force it healthy.
utils.run_vtctl(['IgnoreHealthError', tablet_62344.tablet_alias,
'.*no slave status.*'])
utils.run_vtctl(['RunHealthCheck', tablet_62344.tablet_alias],
auto_log=True)
tablet_62344.wait_for_vttablet_state('SERVING')
self.check_healthz(tablet_62344, True)
# Turn off the force-healthy.
utils.run_vtctl(['IgnoreHealthError', tablet_62344.tablet_alias, ''])
utils.run_vtctl(['RunHealthCheck', tablet_62344.tablet_alias],
auto_log=True)
tablet_62344.wait_for_vttablet_state('NOT_SERVING')
self.check_healthz(tablet_62344, False)
tablet_62344.kill_vttablet()
def test_master_restart_sets_ter_timestamp(self):
"""Test that TER timestamp is set when we restart the MASTER vttablet.
TER = TabletExternallyReparented.
See StreamHealthResponse.tablet_externally_reparented_timestamp for details.
"""
master, replica = tablet_62344, tablet_62044
tablets = [master, replica]
# Start vttablets. Our future master is initially a REPLICA.
for t in tablets:
t.create_db('vt_test_keyspace')
for t in tablets:
t.start_vttablet(wait_for_state='NOT_SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# Initialize tablet as MASTER.
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/0',
master.tablet_alias])
master.wait_for_vttablet_state('SERVING')
# Capture the current TER.
health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
self.assertEqual(topodata_pb2.MASTER, health['target']['tablet_type'])
self.assertIn('tablet_externally_reparented_timestamp', health)
self.assertGreater(health['tablet_externally_reparented_timestamp'], 0,
'TER on MASTER must be set after InitShardMaster')
# Restart the MASTER vttablet.
master.kill_vttablet()
master.start_vttablet(wait_for_state='SERVING',
init_tablet_type='replica',
init_keyspace='test_keyspace',
init_shard='0')
# Make sure that the TER increased i.e. it was set to the current time.
health_after_restart = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
self.assertEqual(topodata_pb2.MASTER,
health_after_restart['target']['tablet_type'])
self.assertIn('tablet_externally_reparented_timestamp',
health_after_restart)
self.assertGreater(
health_after_restart['tablet_externally_reparented_timestamp'],
health['tablet_externally_reparented_timestamp'],
'When the MASTER vttablet was restarted, the TER timestamp must be set'
' to the current time.')
# Shutdown.
for t in tablets:
t.kill_vttablet()
if __name__ == '__main__':
utils.main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from telemetry.core import util
class ActionNotSupported(Exception):
pass
class AndroidActionRunner(object):
"""Provides an API for interacting with an android device.
This makes use of functionality provided by the android input command. None
of the gestures here are guaranteed to be performant for telemetry tests and
there is no official support for this API.
TODO(ariblue): Replace this API with a better implementation for interacting
with native components.
"""
def __init__(self, platform_backend):
self._platform_backend = platform_backend
def SmoothScrollBy(self, left_start_coord, top_start_coord, direction,
scroll_distance):
"""Perfrom gesture to scroll down on the android device.
"""
if direction not in ['down', 'up', 'left', 'right']:
raise ActionNotSupported('Invalid scroll direction: %s' % direction)
# This velocity is slower so that the exact distance we specify is the
# distance the page travels.
duration = scroll_distance
# Note that the default behavior is swiping up for scrolling down.
if direction == 'down':
left_end_coord = left_start_coord
top_end_coord = top_start_coord - scroll_distance
elif direction == 'up':
left_end_coord = left_start_coord
top_end_coord = top_start_coord + scroll_distance
elif direction == 'right':
left_end_coord = left_start_coord - scroll_distance
top_end_coord = top_start_coord
elif direction == 'left':
left_end_coord = left_start_coord + scroll_distance
top_end_coord = top_start_coord
self.InputSwipe(left_start_coord, top_start_coord, left_end_coord,
top_end_coord, duration)
def Wait(self, seconds):
"""Wait for the number of seconds specified.
Args:
seconds: The number of seconds to wait.
"""
time.sleep(seconds)
def InputText(self, string):
"""Convert the characters of the string into key events and send to device.
Args:
string: The string to send to the device.
"""
self._platform_backend.device.RunShellCommand('input text %s' % string)
def InputKeyEvent(self, key):
"""Send a single key input to the device.
Args:
key: A key code number or name that will be sent to the device
"""
self._platform_backend.device.RunShellCommand('input keyevent %s' % key)
def InputTap(self, x_coord, y_coord):
"""Perform a tap input at the given coordinates.
Args:
x_coord: The x coordinate of the tap event.
y_coord: The y coordinate of the tap event.
"""
self._platform_backend.device.RunShellCommand('input tap %s %s' % (x_coord,
y_coord))
def InputSwipe(self, left_start_coord, top_start_coord, left_end_coord,
top_end_coord, duration):
"""Perform a swipe input.
Args:
left_start_coord: The horizontal starting coordinate of the gesture
top_start_coord: The vertical starting coordinate of the gesture
left_end_coord: The horizontal ending coordinate of the gesture
top_end_coord: The vertical ending coordinate of the gesture
duration: The length of time of the swipe in milliseconds
"""
self._platform_backend.device.RunShellCommand(
'input swipe %s %s %s %s %s' % (left_start_coord, top_start_coord,
left_end_coord, top_end_coord,
duration))
def InputPress(self):
"""Perform a press input."""
self._platform_backend.device.RunShellCommand('input press')
def InputRoll(self, dx, dy):
"""Perform a roll input. This sends a simple zero-pressure move event.
Args:
dx: Change in the x coordinate due to move.
dy: Change in the y coordinate due to move.
"""
self._platform_backend.device.RunShellCommand('input roll %s %s' % (dx, dy))
def EnsureScreenOn(self):
"""If device screen is off, turn screen on.
If the screen is already on, return immediately.
Raises:
Timeout: If the screen is off and device fails to turn screen on.
"""
if self._platform_backend.IsScreenOn():
return
self._ToggleScreenOn()
util.WaitFor(self._platform_backend.IsScreenOn, 5)
def TurnScreenOn(self):
"""If device screen is off, turn screen on.
If the screen is already on, log a warning and return immediately.
Raises:
Timeout: If the screen is off and device fails to turn screen on.
"""
if not self._platform_backend.IsScreenOn():
self._ToggleScreenOn()
else:
logging.warning('Screen on when expected off.')
return
util.WaitFor(self._platform_backend.IsScreenOn, 5)
def TurnScreenOff(self):
"""If device screen is on, turn screen off.
If the screen is already off, log a warning and return immediately.
Raises:
Timeout: If the screen is on and device fails to turn screen off.
"""
def is_screen_off():
return not self._platform_backend.IsScreenOn()
if self._platform_backend.IsScreenOn():
self._ToggleScreenOn()
else:
logging.warning('Screen off when expected on.')
return
util.WaitFor(is_screen_off, 5)
def UnlockScreen(self):
"""If device screen is locked, unlocks it.
If the device is not locked, log a warning and return immediately.
Raises:
Timeout: If device fails to unlock screen.
"""
def is_screen_unlocked():
return not self._platform_backend.IsScreenLocked()
if self._platform_backend.IsScreenLocked():
self._platform_backend.device.RunShellCommand('input keyevent 82')
else:
logging.warning('Screen not locked when expected.')
return
util.WaitFor(is_screen_unlocked, 5)
def _ToggleScreenOn(self):
self._platform_backend.device.RunShellCommand('input keyevent 26') | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/kubernetes/test/utils/ktesting"
)
func TestMakeMountsWindows(t *testing.T) {
logger, _ := ktesting.NewTestContext(t)
// TODO: remove skip once the failing test has been fixed.
t.Skip("Skip failing test on Windows.")
container := v1.Container{
VolumeMounts: []v1.VolumeMount{
{
MountPath: "c:/etc/hosts",
Name: "disk",
ReadOnly: false,
},
{
MountPath: "c:/mnt/path3",
Name: "disk",
ReadOnly: true,
},
{
MountPath: "c:/mnt/path4",
Name: "disk4",
ReadOnly: false,
},
{
MountPath: "c:/mnt/path5",
Name: "disk5",
ReadOnly: false,
},
{
MountPath: `\mnt\path6`,
Name: "disk6",
ReadOnly: false,
},
{
MountPath: `/mnt/path7`,
Name: "disk7",
ReadOnly: false,
},
{
MountPath: `\\.\pipe\pipe1`,
Name: "pipe1",
ReadOnly: false,
},
},
}
podVolumes := kubecontainer.VolumeMap{
"disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "c:/mnt/disk"}},
"disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "c:/mnt/host"}},
"disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "c:/var/lib/kubelet/podID/volumes/empty/disk5"}},
"disk6": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: `/mnt/disk6`}},
"disk7": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: `\mnt\disk7`}},
"pipe1": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: `\\.\pipe\pipe1`}},
}
pod := v1.Pod{
Spec: v1.PodSpec{
HostNetwork: true,
},
}
fhu := hostutil.NewFakeHostUtil(nil)
fsp := &subpath.FakeSubpath{}
podDir, err := os.MkdirTemp("", "test-rotate-logs")
require.NoError(t, err)
defer os.RemoveAll(podDir)
mounts, _, err := makeMounts(logger, &pod, podDir, &container, "fakepodname", "", []string{""}, podVolumes, fhu, fsp, nil, false, nil)
require.NoError(t, err)
expectedMounts := []kubecontainer.Mount{
{
Name: "disk",
ContainerPath: "c:/etc/hosts",
HostPath: "c:/mnt/disk",
ReadOnly: false,
SELinuxRelabel: false,
},
{
Name: "disk",
ContainerPath: "c:/mnt/path3",
HostPath: "c:/mnt/disk",
ReadOnly: true,
SELinuxRelabel: false,
},
{
Name: "disk4",
ContainerPath: "c:/mnt/path4",
HostPath: "c:/mnt/host",
ReadOnly: false,
SELinuxRelabel: false,
},
{
Name: "disk5",
ContainerPath: "c:/mnt/path5",
HostPath: "c:/var/lib/kubelet/podID/volumes/empty/disk5",
ReadOnly: false,
SELinuxRelabel: false,
},
{
Name: "disk6",
ContainerPath: `c:\mnt\path6`,
HostPath: `c:/mnt/disk6`,
ReadOnly: false,
SELinuxRelabel: false,
},
{
Name: "disk7",
ContainerPath: `c:/mnt/path7`,
HostPath: `c:\mnt\disk7`,
ReadOnly: false,
SELinuxRelabel: false,
},
{
Name: "pipe1",
ContainerPath: `\\.\pipe\pipe1`,
HostPath: `\\.\pipe\pipe1`,
ReadOnly: false,
SELinuxRelabel: false,
},
{
Name: "k8s-managed-etc-hosts",
ContainerPath: `C:\Windows\System32\drivers\etc\hosts`,
HostPath: filepath.Join(podDir, "etc-hosts"),
ReadOnly: false,
SELinuxRelabel: true,
},
}
assert.Equal(t, expectedMounts, mounts, "mounts of container %+v", container)
} | go | github | https://github.com/kubernetes/kubernetes | pkg/kubelet/kubelet_pods_windows_test.go |
"""
Django settings for mello project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e=2e4enq17+h@nh!zjp4!pdik4x6r&t1-8*i61p7-w8@5k)#7g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'project',
'member',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mello.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mello.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
#----------------------------------------------------------------
AUTH_USER_MODEL = 'member.Member'
MEDIA_ROOT = os.path.join(BASE_DIR, 'upload') | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import java.util.SortedSet;
/**
* GWT emulation of {@link RegularImmutableSortedSet}.
*
* @author Hayward Chan
*/
final class RegularImmutableSortedSet<E> extends ImmutableSortedSet<E> {
/** true if this set is a subset of another immutable sorted set. */
final boolean isSubset;
RegularImmutableSortedSet(SortedSet<E> delegate, boolean isSubset) {
super(delegate);
this.isSubset = isSubset;
}
@Override
ImmutableList<E> createAsList() {
return new ImmutableSortedAsList<>(this, ImmutableList.asImmutableList(toArray()));
}
} | java | github | https://github.com/google/guava | guava-gwt/src-super/com/google/common/collect/super/com/google/common/collect/RegularImmutableSortedSet.java |
# -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Affero General Public License,
# publicada pela Free Software Foundation, em sua versão 3 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Affero General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Affero General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from pysped.xml_sped import *
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_3 as ESQUEMA_ATUAL
from pysped.nfe.leiaute import consstatserv_200
import os
DIRNAME = os.path.dirname(__file__)
class ConsStatServ(consstatserv_200.ConsStatServ):
def __init__(self):
super(ConsStatServ, self).__init__()
self.versao = TagDecimal(nome='consStatServ', codigo='FP01', propriedade='versao', namespace=NAMESPACE_NFE, valor='3.10', raiz='/')
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'consStatServ_v3.10.xsd'
class RetConsStatServ(consstatserv_200.RetConsStatServ):
def __init__(self):
super(RetConsStatServ, self).__init__()
self.versao = TagDecimal(nome='retConsStatServ', codigo='FR01', propriedade='versao', namespace=NAMESPACE_NFE, valor='3.10', raiz='/')
self.dhRecbto = TagDataHoraUTC(nome='dhRecbto' , codigo='FR08', raiz='//retConsStatServ')
self.dhRetorno = TagDataHoraUTC(nome='dhRetorno' , codigo='FR10', raiz='//retConsStatServ', obrigatorio=False)
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retConsStatServ_v3.10.xsd' | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Top-level presubmit script for auth server.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
def FindAppEngineSDK(input_api):
"""Returns an absolute path to AppEngine SDK (or None if not found)."""
import sys
old_sys_path = sys.path
try:
# Add 'components' to sys.path to be able to import gae_sdk_utils.
components_dir = input_api.os_path.join(
input_api.PresubmitLocalPath(), '..', 'components')
sys.path = [components_dir] + sys.path
# pylint: disable=F0401
from support import gae_sdk_utils
return gae_sdk_utils.find_gae_sdk()
finally:
sys.path = old_sys_path
def CommonChecks(input_api, output_api):
output = []
def join(*args):
return input_api.os_path.join(input_api.PresubmitLocalPath(), *args)
gae_sdk_path = FindAppEngineSDK(input_api)
if not gae_sdk_path:
output.append(output_api.PresubmitError('Couldn\'t find AppEngine SDK.'))
return output
import sys
old_sys_path = sys.path
try:
# Add GAE SDK modules to sys.path.
sys.path = [gae_sdk_path] + sys.path
import appcfg
appcfg.fix_sys_path()
# Add project specific paths to sys.path
sys.path = [
join('components', 'third_party'),
join('tests'),
# See tests/test_env.py for more information.
join('..', 'components'),
join('..', 'components', 'third_party'),
] + sys.path
black_list = list(input_api.DEFAULT_BLACK_LIST) + [
r'.*_pb2\.py$',
]
disabled_warnings = [
]
output.extend(input_api.canned_checks.RunPylint(
input_api, output_api,
black_list=black_list,
disabled_warnings=disabled_warnings))
finally:
sys.path = old_sys_path
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api,
join('tests'),
whitelist=[r'.+_test\.py$'])
output.extend(input_api.RunTests(tests, parallel=True))
return output
def CheckChangeOnUpload(input_api, output_api):
return CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CommonChecks(input_api, output_api) | unknown | codeparrot/codeparrot-clean | ||
## begin license ##
#
# "Meresco Examples" is a project demonstrating some of the
# features of various components of the "Meresco Suite".
# Also see http://meresco.org.
#
# Copyright (C) 2012-2016 Seecr (Seek You Too B.V.) http://seecr.nl
#
# This file is part of "Meresco Examples"
#
# "Meresco Examples" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Examples" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Examples"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from os import getuid
assert getuid() != 0, "Do not run tests as 'root'"
from seecrdeps import includeParentAndDeps #DO_NOT_DISTRIBUTE
includeParentAndDeps(__file__, scanForDeps=True) #DO_NOT_DISTRIBUTE
from seecr.test.testrunner import TestRunner
from _integration import ExampleIntegrationState
if __name__ == '__main__':
runner = TestRunner()
ExampleIntegrationState(
"default",
tests=[
'_integration.gatewaytest.GatewayTest',
'_integration.apitest.ApiTest',
],
fastMode=runner.fastMode).addToTestRunner(runner)
runner.run() | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package s3
import (
"encoding/json"
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/arn"
"github.com/hashicorp/terraform/internal/tfdiags"
"github.com/zclconf/go-cty/cty"
)
const (
multiRegionKeyIdPattern = `mrk-[a-f0-9]{32}`
uuidRegexPattern = `[a-f0-9]{8}-[a-f0-9]{4}-[1-5][a-f0-9]{3}-[ab89][a-f0-9]{3}-[a-f0-9]{12}`
aliasRegexPattern = `alias/[a-zA-Z0-9/_-]+`
)
func validateKMSKey(path cty.Path, s string) (diags tfdiags.Diagnostics) {
if arn.IsARN(s) {
return validateKMSKeyARN(path, s)
}
return validateKMSKeyID(path, s)
}
func validateKMSKeyID(path cty.Path, s string) (diags tfdiags.Diagnostics) {
keyIdRegex := regexp.MustCompile(`^` + uuidRegexPattern + `|` + multiRegionKeyIdPattern + `|` + aliasRegexPattern + `$`)
if !keyIdRegex.MatchString(s) {
diags = diags.Append(tfdiags.AttributeValue(
tfdiags.Error,
"Invalid KMS Key ID",
fmt.Sprintf("Value must be a valid KMS Key ID, got %q", s),
path,
))
return diags
}
return diags
}
func validateKMSKeyARN(path cty.Path, s string) (diags tfdiags.Diagnostics) {
parsedARN, err := arn.Parse(s)
if err != nil {
diags = diags.Append(tfdiags.AttributeValue(
tfdiags.Error,
"Invalid KMS Key ARN",
fmt.Sprintf("Value must be a valid KMS Key ARN, got %q", s),
path,
))
return diags
}
if !isKeyARN(parsedARN) {
diags = diags.Append(tfdiags.AttributeValue(
tfdiags.Error,
"Invalid KMS Key ARN",
fmt.Sprintf("Value must be a valid KMS Key ARN, got %q", s),
path,
))
return diags
}
return diags
}
func isKeyARN(arn arn.ARN) bool {
return keyIdFromARNResource(arn.Resource) != "" || aliasIdFromARNResource(arn.Resource) != ""
}
func keyIdFromARNResource(s string) string {
keyIdResourceRegex := regexp.MustCompile(`^key/(` + uuidRegexPattern + `|` + multiRegionKeyIdPattern + `)$`)
matches := keyIdResourceRegex.FindStringSubmatch(s)
if matches == nil || len(matches) != 2 {
return ""
}
return matches[1]
}
func aliasIdFromARNResource(s string) string {
aliasIdResourceRegex := regexp.MustCompile(`^(` + aliasRegexPattern + `)$`)
matches := aliasIdResourceRegex.FindStringSubmatch(s)
if matches == nil || len(matches) != 2 {
return ""
}
return matches[1]
}
type stringValidator func(val string, path cty.Path, diags *tfdiags.Diagnostics)
func validateStringNotEmpty(val string, path cty.Path, diags *tfdiags.Diagnostics) {
val = strings.TrimSpace(val)
if len(val) == 0 {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
"The value cannot be empty or all whitespace",
path,
))
}
}
func validateStringLenBetween(min, max int) stringValidator {
return func(val string, path cty.Path, diags *tfdiags.Diagnostics) {
if l := len(val); l < min || l > max {
*diags = diags.Append(attributeErrDiag(
"Invalid Value Length",
fmt.Sprintf("Length must be between %d and %d, had %d", min, max, l),
path,
))
}
}
}
func validateStringMatches(re *regexp.Regexp, description string) stringValidator {
return func(val string, path cty.Path, diags *tfdiags.Diagnostics) {
if !re.MatchString(val) {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
description,
path,
))
}
}
}
func validateStringDoesNotContain(s string) stringValidator {
return func(val string, path cty.Path, diags *tfdiags.Diagnostics) {
if strings.Contains(val, s) {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
fmt.Sprintf(`Value must not contain "%s"`, s),
path,
))
}
}
}
func validateStringInSlice(sl []string) stringValidator {
return func(val string, path cty.Path, diags *tfdiags.Diagnostics) {
match := false
for _, s := range sl {
if val == s {
match = true
}
}
if !match {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
fmt.Sprintf("Value must be one of [%s]", strings.Join(sl, ", ")),
path,
))
}
}
}
// validateStringRetryMode ensures the provided value in a valid AWS retry mode
func validateStringRetryMode(val string, path cty.Path, diags *tfdiags.Diagnostics) {
_, err := aws.ParseRetryMode(val)
if err != nil {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
err.Error(),
path,
))
}
}
// S3 will strip leading slashes from an object, so while this will
// technically be accepted by S3, it will break our workspace hierarchy.
// S3 will recognize objects with a trailing slash as a directory
// so they should not be valid keys
func validateStringS3Path(val string, path cty.Path, diags *tfdiags.Diagnostics) {
if strings.HasPrefix(val, "/") || strings.HasSuffix(val, "/") {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
`The value must not start or end with "/"`,
path,
))
}
}
func validateARN(validators ...arnValidator) stringValidator {
return func(val string, path cty.Path, diags *tfdiags.Diagnostics) {
parsedARN, err := arn.Parse(val)
if err != nil {
*diags = diags.Append(attributeErrDiag(
"Invalid ARN",
fmt.Sprintf("The value %q cannot be parsed as an ARN: %s", val, err),
path,
))
return
}
for _, validator := range validators {
validator(parsedARN, path, diags)
}
}
}
// Copied from `ValidIAMPolicyJSON` (https://github.com/hashicorp/terraform-provider-aws/blob/ffd1c8a006dcd5a6b58a643df9cc147acb5b7a53/internal/verify/validate.go#L154)
func validateIAMPolicyDocument(val string, path cty.Path, diags *tfdiags.Diagnostics) {
// IAM Policy documents need to be valid JSON, and pass legacy parsing
val = strings.TrimSpace(val)
if first := val[:1]; first != "{" {
switch val[:1] {
case `"`:
// There are some common mistakes that lead to strings appearing
// here instead of objects, so we'll try some heuristics to
// check for those so we might give more actionable feedback in
// these situations.
var content string
var innerContent any
if err := json.Unmarshal([]byte(val), &content); err == nil {
if strings.HasSuffix(content, ".json") {
*diags = diags.Append(attributeErrDiag(
"Invalid IAM Policy Document",
fmt.Sprintf(`Expected a JSON object describing the policy, had a JSON-encoded string.
The string %q looks like a filename, please pass the contents of the file instead of the filename.`,
content,
),
path,
))
return
} else if err := json.Unmarshal([]byte(content), &innerContent); err == nil {
// hint = " (have you double-encoded your JSON data?)"
*diags = diags.Append(attributeErrDiag(
"Invalid IAM Policy Document",
`Expected a JSON object describing the policy, had a JSON-encoded string.
The string content was valid JSON, your policy document may have been double-encoded.`,
path,
))
return
}
}
*diags = diags.Append(attributeErrDiag(
"Invalid IAM Policy Document",
"Expected a JSON object describing the policy, had a JSON-encoded string.",
path,
))
default:
// Generic error for if we didn't find something more specific to say.
*diags = diags.Append(attributeErrDiag(
"Invalid IAM Policy Document",
"Expected a JSON object describing the policy",
path,
))
}
} else {
var j any
if err := json.Unmarshal([]byte(val), &j); err != nil {
errStr := err.Error()
var jsonErr *json.SyntaxError
if errors.As(err, &jsonErr) {
errStr += fmt.Sprintf(", at byte offset %d", jsonErr.Offset)
}
*diags = diags.Append(attributeErrDiag(
"Invalid JSON Document",
fmt.Sprintf("The JSON document contains an error: %s", errStr),
path,
))
}
}
}
func validateStringKMSKey(val string, path cty.Path, diags *tfdiags.Diagnostics) {
ds := validateKMSKey(path, val)
*diags = diags.Append(ds)
}
// validateStringLegacyURL validates that a string can be parsed generally as a URL, but does
// not ensure that the URL is valid.
func validateStringLegacyURL(val string, path cty.Path, diags *tfdiags.Diagnostics) {
u, err := url.Parse(val)
if err != nil {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
fmt.Sprintf("The value %q cannot be parsed as a URL: %s", val, err),
path,
))
return
}
if u.Scheme == "" || u.Host == "" {
*diags = diags.Append(legacyIncompleteURLDiag(val, path))
return
}
}
func legacyIncompleteURLDiag(val string, path cty.Path) tfdiags.Diagnostic {
return attributeWarningDiag(
"Complete URL Expected",
fmt.Sprintf(`The value should be a valid URL containing at least a scheme and hostname. Had %q.
Using an incomplete URL, such as a hostname only, may work, but may have unexpected behavior.`, val),
path,
)
}
// validateStringValidURL validates that a URL is a valid URL, inclding a scheme and host
func validateStringValidURL(val string, path cty.Path, diags *tfdiags.Diagnostics) {
u, err := url.Parse(val)
if err != nil {
*diags = diags.Append(attributeErrDiag(
"Invalid Value",
fmt.Sprintf("The value %q cannot be parsed as a URL: %s", val, err),
path,
))
return
}
if u.Scheme == "" || u.Host == "" {
*diags = diags.Append(invalidURLDiag(val, path))
return
}
}
func invalidURLDiag(val string, path cty.Path) tfdiags.Diagnostic {
return attributeErrDiag(
"Invalid Value",
fmt.Sprintf("The value must be a valid URL containing at least a scheme and hostname. Had %q", val),
path,
)
}
// Using a val of `cty.ValueSet` would be better here, but we can't get an ElementIterator from a ValueSet
type setValidator func(val cty.Value, path cty.Path, diags *tfdiags.Diagnostics)
func validateSetStringElements(validators ...stringValidator) setValidator {
return func(val cty.Value, path cty.Path, diags *tfdiags.Diagnostics) {
typ := val.Type()
if eltTyp := typ.ElementType(); eltTyp != cty.String {
*diags = diags.Append(attributeErrDiag(
"Internal Error",
fmt.Sprintf(`Expected type to be %s, got: %s`, cty.Set(cty.String).FriendlyName(), val.Type().FriendlyName()),
path,
))
return
}
eltPath := make(cty.Path, len(path)+1)
copy(eltPath, path)
idxIdx := len(path)
iter := val.ElementIterator()
for iter.Next() {
idx, elt := iter.Element()
eltPath[idxIdx] = cty.IndexStep{Key: idx}
for _, validator := range validators {
validator(elt.AsString(), eltPath, diags)
}
}
}
}
type arnValidator func(val arn.ARN, path cty.Path, diags *tfdiags.Diagnostics)
func validateIAMRoleARN(val arn.ARN, path cty.Path, diags *tfdiags.Diagnostics) {
if !strings.HasPrefix(val.Resource, "role/") {
*diags = diags.Append(attributeErrDiag(
"Invalid IAM Role ARN",
fmt.Sprintf("Value must be a valid IAM Role ARN, got %q", val),
path,
))
}
}
func validateIAMPolicyARN(val arn.ARN, path cty.Path, diags *tfdiags.Diagnostics) {
if !strings.HasPrefix(val.Resource, "policy/") {
*diags = diags.Append(attributeErrDiag(
"Invalid IAM Policy ARN",
fmt.Sprintf("Value must be a valid IAM Policy ARN, got %q", val),
path,
))
}
}
func validateDuration(validators ...durationValidator) stringValidator {
return func(val string, path cty.Path, diags *tfdiags.Diagnostics) {
duration, err := time.ParseDuration(val)
if err != nil {
*diags = diags.Append(attributeErrDiag(
"Invalid Duration",
fmt.Sprintf("The value %q cannot be parsed as a duration: %s", val, err),
path,
))
return
}
for _, validator := range validators {
validator(duration, path, diags)
}
}
}
type durationValidator func(val time.Duration, path cty.Path, diags *tfdiags.Diagnostics)
func validateDurationBetween(min, max time.Duration) durationValidator {
return func(val time.Duration, path cty.Path, diags *tfdiags.Diagnostics) {
if val < min || val > max {
*diags = diags.Append(attributeErrDiag(
"Invalid Duration",
fmt.Sprintf("Duration must be between %s and %s, had %s", min, max, val),
path,
))
}
}
}
type objectValidator func(obj cty.Value, objPath cty.Path, diags *tfdiags.Diagnostics)
func validateAttributesConflict(paths ...cty.Path) objectValidator {
return func(obj cty.Value, objPath cty.Path, diags *tfdiags.Diagnostics) {
found := false
for _, path := range paths {
val, err := path.Apply(obj)
if err != nil {
*diags = diags.Append(attributeErrDiag(
"Invalid Path for Schema",
"The S3 Backend unexpectedly provided a path that does not match the schema. "+
"Please report this to the developers.\n\n"+
"Path: "+pathString(path)+"\n\n"+
"Error:"+err.Error(),
objPath,
))
continue
}
if !val.IsNull() {
if found {
pathStrs := make([]string, len(paths))
for i, path := range paths {
pathStrs[i] = pathString(path)
}
*diags = diags.Append(invalidAttributeCombinationDiag(objPath, paths))
} else {
found = true
}
}
}
}
}
func validateExactlyOneOfAttributes(paths ...cty.Path) objectValidator {
return func(obj cty.Value, objPath cty.Path, diags *tfdiags.Diagnostics) {
var localDiags tfdiags.Diagnostics
found := make(map[string]cty.Path, len(paths))
for _, path := range paths {
val, err := path.Apply(obj)
if err != nil {
localDiags = localDiags.Append(attributeErrDiag(
"Invalid Path for Schema",
"The S3 Backend unexpectedly provided a path that does not match the schema. "+
"Please report this to the developers.\n\n"+
"Path: "+pathString(path)+"\n\n"+
"Error:"+err.Error(),
objPath,
))
continue
}
if !val.IsNull() {
found[pathString(path)] = path
}
}
*diags = diags.Append(localDiags)
if len(found) > 1 {
*diags = diags.Append(invalidAttributeCombinationDiag(objPath, paths))
return
}
if len(found) == 0 && !localDiags.HasErrors() {
pathStrs := make([]string, len(paths))
for i, path := range paths {
pathStrs[i] = pathString(path)
}
*diags = diags.Append(attributeErrDiag(
"Missing Required Value",
fmt.Sprintf(`Exactly one of %s must be set.`, strings.Join(pathStrs, ", ")),
objPath,
))
}
}
}
func invalidAttributeCombinationDiag(objPath cty.Path, paths []cty.Path) tfdiags.Diagnostic {
pathStrs := make([]string, len(paths))
for i, path := range paths {
pathStrs[i] = pathString(path)
}
return attributeErrDiag(
"Invalid Attribute Combination",
fmt.Sprintf(`Only one of %s can be set.`, strings.Join(pathStrs, ", ")),
objPath,
)
}
func attributeErrDiag(summary, detail string, attrPath cty.Path) tfdiags.Diagnostic {
return tfdiags.AttributeValue(tfdiags.Error, summary, detail, attrPath.Copy())
}
func attributeWarningDiag(summary, detail string, attrPath cty.Path) tfdiags.Diagnostic {
return tfdiags.AttributeValue(tfdiags.Warning, summary, detail, attrPath.Copy())
}
func wholeBodyErrDiag(summary, detail string) tfdiags.Diagnostic {
return tfdiags.WholeContainingBody(tfdiags.Error, summary, detail)
}
func wholeBodyWarningDiag(summary, detail string) tfdiags.Diagnostic {
return tfdiags.WholeContainingBody(tfdiags.Warning, summary, detail)
}
var assumeRoleNameValidator = []stringValidator{
validateStringLenBetween(2, 64),
validateStringMatches(
regexp.MustCompile(`^[\w+=,.@\-]*$`),
`Value can only contain letters, numbers, or the following characters: =,.@-`,
),
} | go | github | https://github.com/hashicorp/terraform | internal/backend/remote-state/s3/validate.go |
from django.core.exceptions import PermissionDenied
from django.template.response import TemplateResponse
from django.test import SimpleTestCase, modify_settings, override_settings
from django.urls import path
class MiddlewareAccessingContent:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
# Response.content should be available in the middleware even with a
# TemplateResponse-based exception response.
assert response.content
return response
def template_response_error_handler(request, exception=None):
return TemplateResponse(request, "test_handler.html", status=403)
def permission_denied_view(request):
raise PermissionDenied
urlpatterns = [
path("", permission_denied_view),
]
handler403 = template_response_error_handler
@override_settings(ROOT_URLCONF="handlers.tests_custom_error_handlers")
@modify_settings(
MIDDLEWARE={
"append": "handlers.tests_custom_error_handlers.MiddlewareAccessingContent"
}
)
class CustomErrorHandlerTests(SimpleTestCase):
def test_handler_renders_template_response(self):
"""
BaseHandler should render TemplateResponse if necessary.
"""
response = self.client.get("/")
self.assertContains(response, "Error handler content", status_code=403) | python | github | https://github.com/django/django | tests/handlers/tests_custom_error_handlers.py |
from unittest import TestCase, main
from socketio.namespace import BaseNamespace
from socketio.virtsocket import Socket
class MockSocketIOServer(object):
"""Mock a SocketIO server"""
def __init__(self, *args, **kwargs):
self.sockets = {}
def get_socket(self, socket_id=''):
return self.sockets.get(socket_id)
class MockSocketIOhandler(object):
"""Mock a SocketIO handler"""
def __init__(self, *args, **kwargs):
self.server = MockSocketIOServer()
class MockNamespace(BaseNamespace):
"""Mock a Namespace from the namespace module"""
pass
class TestSocketAPI(TestCase):
"""Test the virtual Socket object"""
def setUp(self):
self.server = MockSocketIOServer()
self.virtsocket = Socket(self.server, {})
def test__set_namespaces(self):
namespaces = {'/': MockNamespace}
self.virtsocket._set_namespaces(namespaces)
self.assertEqual(self.virtsocket.namespaces, namespaces)
def test__set_request(self):
request = {'test': 'a'}
self.virtsocket._set_request(request)
self.assertEqual(self.virtsocket.request, request)
def test__set_environ(self):
environ = []
self.virtsocket._set_environ(environ)
self.assertEqual(self.virtsocket.environ, environ)
def test_connected_property(self):
# not connected
self.assertFalse(self.virtsocket.connected)
# connected
self.virtsocket.state = "CONNECTED"
self.assertTrue(self.virtsocket.connected)
def test_incr_hist(self):
self.virtsocket.state = "CONNECTED"
# cause a hit
self.virtsocket.incr_hits()
self.assertEqual(self.virtsocket.hits, 1)
self.assertEqual(self.virtsocket.state, self.virtsocket.STATE_CONNECTED)
def test_disconnect(self):
# kill connected socket
self.virtsocket.state = "CONNECTED"
self.virtsocket.active_ns = {'test' : MockNamespace({'socketio': self.virtsocket}, 'test')}
self.virtsocket.disconnect()
self.assertEqual(self.virtsocket.state, "DISCONNECTING")
self.assertEqual(self.virtsocket.active_ns, {})
def test_kill(self):
# kill connected socket
self.virtsocket.state = "CONNECTED"
self.virtsocket.active_ns = {'test' : MockNamespace({'socketio': self.virtsocket}, 'test')}
self.virtsocket.kill()
self.assertEqual(self.virtsocket.state, "DISCONNECTING")
def test__receiver_loop(self):
"""Test the loop """
# most of the method is tested by test_packet.TestDecode and
# test_namespace.TestBaseNamespace
pass
# self.virtsocket._receiver_loop()
# self.virtsocket.server_queue.put_nowait_msg('2::')
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
from configparser import ConfigParser
import argparse
import os
from contactsdk.connector import Connector
def parse_input():
parser = argparse.ArgumentParser()
parser.add_argument("config_file", type=str, help="Configuration file")
args = parser.parse_args()
return args
def program():
print "*********************************************"
print "* Welcome to CONTACT-TOOLS SDK Command Line *"
print "*********************************************"
parsed_input = parse_input()
parser = ConfigParser()
parser.read(os.path.expanduser(parsed_input.config_file))
access_token = parser.get('secret', 'access_token')
base_url = parser.get('secret', 'base_url')
endpoint = parser.get('secret', 'endpoint')
# Create Connector
connector = Connector(access_token, base_url, endpoint)
print "\n#### Read Configuration from %s ####" % parsed_input.config_file
print connector
def main():
try:
program()
except Exception as e:
print e
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gc
import weakref
import mock
from oslo.config import cfg
from neutron.db import agentschedulers_db
from neutron import manager
from neutron.tests import base
from neutron.tests import fake_notifier
class PluginSetupHelper(object):
"""Mixin for use with testtools.TestCase."""
def cleanup_core_plugin(self):
"""Ensure that the core plugin is deallocated."""
nm = manager.NeutronManager
if not nm.has_instance():
return
# TODO(marun) Fix plugins that do not properly initialize notifiers
agentschedulers_db.AgentSchedulerDbMixin.agent_notifiers = {}
# Perform a check for deallocation only if explicitly
# configured to do so since calling gc.collect() after every
# test increases test suite execution time by ~50%.
check_plugin_deallocation = (
base.bool_from_env('OS_CHECK_PLUGIN_DEALLOCATION'))
if check_plugin_deallocation:
plugin = weakref.ref(nm._instance.plugin)
nm.clear_instance()
if check_plugin_deallocation:
gc.collect()
# TODO(marun) Ensure that mocks are deallocated?
if plugin() and not isinstance(plugin(), mock.Base):
self.fail('The plugin for this test was not deallocated.')
def setup_coreplugin(self, core_plugin=None):
# Plugin cleanup should be triggered last so that
# test-specific cleanup has a chance to release references.
self.addCleanup(self.cleanup_core_plugin)
if core_plugin is not None:
cfg.CONF.set_override('core_plugin', core_plugin)
class NotificationSetupHelper(object):
"""Mixin for use with testtools.TestCase."""
def setup_notification_driver(self, notification_driver=None):
self.addCleanup(fake_notifier.reset)
if notification_driver is None:
notification_driver = [fake_notifier.__name__]
cfg.CONF.set_override("notification_driver", notification_driver) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Module graph
Implements a directed acyclic graph (DAG) which will throw an error
when a circular dependancy is created. A topological sort is used to determine
what nodes can be run first.
"""
import logging
logger = logging.getLogger(__name__)
class Node:
def __init__(self, name):
self.name = name
self.inputs = {}
self.outputs = {}
def __str__(self):
return self.name
def print_edges(self):
print "%s:\tINPUTS: %s" % (self.name, map(str, self.inputs.keys()))
print "%s:\tOUTPUTS: %s" % (self.name, map(str, self.outputs.keys()))
class Graph:
def __init__(self):
self.nodes = {}
def add(self, name, input=None, inputs=None, output=None, outputs=None):
# node = Node(name)
node, created = self.get_or_create(name)
# Create edges if specified, either by name or obj.
if input:
self.add_edge(input, name)
if output:
self.add_edge(name, output)
if inputs:
for o in inputs:
self.add_edge(o, name)
if outputs:
for o in outputs:
self.add_edge(name, o)
self.nodes[name] = node
# print node.inputs
# print node.outputs
return node
def add_edge(self, start_node, end_node):
start, created = self.get_or_create(start_node)
end, created = self.get_or_create(end_node)
start.outputs[end.name] = end
end.inputs[start.name] = start
def get(self, name):
return self.nodes[name]
# if name in self.nodes:
# return self.nodes[name]
# else:
# return None
def get_or_create(self, name):
if name in self.nodes:
return (self.nodes[name], True)
else:
# node = self.add(name)
node = Node(name)
self.nodes[name] = node
return (node, True)
def list(self):
"""returns an toplogical sorted list of elements"""
return self.nodes | unknown | codeparrot/codeparrot-clean | ||
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
import json
import os
import testUtils
import pytest
from wiotp.sdk import InvalidEventException, JsonCodec
class DummyPahoMessage(object):
def __init__(self, object):
self.payload = bytearray()
try:
self.payload.extend(json.dumps(object))
except:
# python 3
self.payload.extend(map(ord, json.dumps(object)))
class NonJsonDummyPahoMessage(object):
def __init__(self, object):
self.payload = bytearray()
try:
self.payload.extend(object)
except:
# python 3
self.payload.extend(map(ord, object))
class TestDevice(testUtils.AbstractTest):
def testJsonObject(self):
message = JsonCodec.decode(DummyPahoMessage({"foo": "bar"}))
assert isinstance(message.data, dict)
assert message.data["foo"] == "bar"
def testJsonString(self):
message = JsonCodec.decode(DummyPahoMessage("bar"))
try:
assert isinstance(message.data, unicode)
except NameError as e:
# Python 3
assert isinstance(message.data, str)
def testJsonBoolean(self):
message = JsonCodec.decode(DummyPahoMessage(False))
assert isinstance(message.data, bool)
def testJsonInt(self):
message = JsonCodec.decode(DummyPahoMessage(1))
assert isinstance(message.data, int)
def testInvalidJson(self):
with pytest.raises(InvalidEventException):
message = JsonCodec.decode(NonJsonDummyPahoMessage("{sss,eee}")) | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.eventbus.outside;
import com.google.common.eventbus.EventBus;
import junit.framework.TestCase;
import org.jspecify.annotations.Nullable;
/**
* Abstract base class for tests that EventBus finds the correct subscribers.
*
* <p>The actual tests are distributed among the other classes in this package based on whether they
* are annotated or abstract in the superclass.
*
* <p>This test must be outside the c.g.c.eventbus package to test correctly.
*
* @author Louis Wasserman
*/
abstract class AbstractEventBusTest<H> extends TestCase {
static final Object EVENT = new Object();
abstract H createSubscriber();
private @Nullable H subscriber;
H getSubscriber() {
return subscriber;
}
@Override
protected void setUp() throws Exception {
subscriber = createSubscriber();
EventBus bus = new EventBus();
bus.register(subscriber);
bus.post(EVENT);
}
@Override
protected void tearDown() throws Exception {
subscriber = null;
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/eventbus/outside/AbstractEventBusTest.java |
'use strict';
const common = require('../common.js');
const bench = common.createBenchmark(main, {
size: [16, 512, 4096, 16386],
args: [1, 2, 5],
n: [1e6],
});
function main({ n, size, args }) {
const b0 = Buffer.alloc(size, 'a');
const b1 = Buffer.alloc(size, 'a');
const b0Len = b0.length;
const b1Len = b1.length;
b1[size - 1] = 'b'.charCodeAt(0);
switch (args) {
case 2:
b0.compare(b1, 0);
bench.start();
for (let i = 0; i < n; i++) {
b0.compare(b1, 0);
}
bench.end(n);
break;
case 3:
b0.compare(b1, 0, b1Len);
bench.start();
for (let i = 0; i < n; i++) {
b0.compare(b1, 0, b1Len);
}
bench.end(n);
break;
case 4:
b0.compare(b1, 0, b1Len, 0);
bench.start();
for (let i = 0; i < n; i++) {
b0.compare(b1, 0, b1Len, 0);
}
bench.end(n);
break;
case 5:
b0.compare(b1, 0, b1Len, 0, b0Len);
bench.start();
for (let i = 0; i < n; i++) {
b0.compare(b1, 0, b1Len, 0, b0Len);
}
bench.end(n);
break;
default:
b0.compare(b1);
bench.start();
for (let i = 0; i < n; i++) {
b0.compare(b1);
}
bench.end(n);
}
} | javascript | github | https://github.com/nodejs/node | benchmark/buffers/buffer-compare-instance-method.js |
#!/usr/bin/env python
import json
import logging
import socket
import sys
import time
import traceback
import gflags
import webapp2
from actions.compressor import CompressorToggle
from actions.compressor import State
from actions.dispense_cup import DispenseCup, ReleaseCup
from actions.home import Home
from actions.led import SetLedForValve
from actions.meter_bitters import MeterBitters
from actions.meter_simple import MeterSimple as Meter
#from actions.meter_dead_reckoned import MeterDeadReckoned as Meter
from actions.move import Move
from actions.pressurize import HoldPressure, ReleasePressure
from config import ingredients
from config import valve_position
from controller import Controller
from drinks import manual_db
from drinks.actions_for_recipe import actions_for_recipe
from drinks.random_drinks import RandomSourDrink, RandomSpirituousDrink, RandomBubblySourDrink, RandomBubblySpirituousDrink
from drinks.recipe import Recipe
from drinks.water_down import water_down_recipe
from fake_robot import FakeRobot
from actions.slam_stir import STIR_POSITION, SlamStir
import poll_appengine
FLAGS = gflags.FLAGS
TEMPLATE_DIR = "templates/"
STATIC_FILE_DIR = "static/"
robot = None
controller = None
CUP_DISPENSE_POSITION = -3.465
class CustomJsonEncoder(json.JSONEncoder):
def default(self, obj):
if (hasattr(obj, '__class__') and obj.__class__.__name__ in
('ActionException', 'LoadCellMonitor', 'TareTimeout', 'MeterTimeout')):
key = '__%s__' % obj.__class__.__name__
return {key: obj.__dict__}
return json.JSONEncoder.default(self, obj)
def GetTemplate(filename):
return open(TEMPLATE_DIR + filename).read()
def ServeFile(filename):
class ServeFileImpl(webapp2.RequestHandler):
def get(self):
self.response.write(open(filename).read())
return ServeFileImpl
class LoadCellJson(webapp2.RequestHandler):
def get(self):
self.response.write("[")
self.response.write(','.join('[%s, %f]' % rec
for rec in robot.load_cell.recent_secs(10)))
self.response.write("]")
class DrinkDbHandler(webapp2.RequestHandler):
def get(self):
self.response.content_type = 'application/javascript'
self.response.write("db = ['%s'];" % "','".join(r.name
for r in manual_db.db))
class InspectQueueJson(webapp2.RequestHandler):
def get(self):
"""Displays the state of the action queue."""
self.response.write(json.dumps({
'actions': [action.inspect() for action in controller.InspectQueue()],
'exception': controller.last_exception
},
cls=CustomJsonEncoder))
class InspectQueue(webapp2.RequestHandler):
def get(self):
"""Displays the state of the action queue."""
actions = controller.InspectQueue()
content = []
if not actions:
content.append("Queue is empty")
else:
for action in actions:
d = action.inspect()
name, props = d['name'], d['args']
content.append(name)
for prop in props.items():
content.append('\t%s: %s' % prop)
self.response.write(GetTemplate('queue.html').format(
exception=controller.last_exception,
content='\n'.join(content),
robot_dict=robot.__dict__))
META_REFRESH = """
<html>
<head>
<title>{msg}</title>
<meta http-equiv="refresh" content="2;URL={url}">
</head>
<body>
{msg}
</body>
</html>
"""
class RetryQueue(webapp2.RequestHandler):
def post(self):
if controller.last_exception:
controller.Retry()
self.response.write(META_REFRESH.format(msg="Retrying...", url="/queue"))
class ClearQueue(webapp2.RequestHandler):
def post(self):
controller.ClearAndResume()
self.response.write(META_REFRESH.format(msg="Cleared...", url="/queue"))
class SkipQueue(webapp2.RequestHandler):
def post(self):
if controller.last_exception:
controller.SkipAndResume()
self.response.write(META_REFRESH.format(msg="Skipped...", url="/queue"))
class StaticFileHandler(webapp2.RequestHandler):
"""Serve static files out of STATIC_FILE_DIR."""
def get(self):
if '.svg' in self.request.path:
self.response.content_type = 'application/svg+xml'
elif '.png' in self.request.path:
self.response.content_type = 'image/png'
elif '.jpg' in self.request.path:
self.response.content_type = 'image/jpg'
elif '.js' in self.request.path:
self.response.content_type = 'application/javascript'
elif '.css' in self.request.path:
self.response.content_type = 'text/css'
relative_path = self.to_relative_path(self.request.path)
path = STATIC_FILE_DIR + relative_path
try:
logging.debug("%s => %s", self.request.path, path)
self.response.write(open(path).read())
except IOError:
print "404 could not load: %s" % path
self.response.status = 404
def to_relative_path(self, path):
if len(path) > 0 and path[0] == "/":
return path[1:]
def SingleActionHandler(action):
"""Create a handler for queueing the given action class"""
class Handler(webapp2.RequestHandler):
def post(self):
controller.EnqueueGroup([action(),])
self.response.write("%s action queued." % action.__name__)
return Handler
def recipe_from_json_object(recipe_obj):
"""Takes a dict decoded from a JSON recipe and returns a Recipe object."""
recipe = Recipe.from_json(recipe_obj)
if not recipe.ingredients:
if recipe_obj['drink_name'] == "Random Sour":
recipe = RandomSourDrink()
recipe.user_name = recipe_obj['user_name']
elif recipe_obj['drink_name'] == "Random Boozy":
recipe = RandomSpirituousDrink()
recipe.user_name = recipe_obj['user_name']
elif recipe_obj['drink_name'] == "Random Bubbly Boozy":
recipe = RandomBubblySpirituousDrink()
recipe.user_name = recipe_obj['user_name']
elif recipe_obj['drink_name'] == "Random Bubbly Sour":
recipe = RandomBubblySourDrink()
recipe.user_name = recipe_obj['user_name']
recipe = water_down_recipe(recipe)
return recipe
class AllDrinksHandler(webapp2.RequestHandler):
def get(self):
drinks = []
for drink in manual_db.LiveDB():
data = drink.json
data['image'] = drink.name.replace(' ', '_').lower() + '.jpg'
drinks.append(data)
self.response.write(json.dumps(drinks))
print "responding to drinks request"
class DrinkHandler(webapp2.RequestHandler):
def post(self):
name = self.request.get('name')
if name:
for drink in manual_db.db:
if drink.name.lower() == name.lower():
self.response.write("Making drink %s" % drink)
controller.EnqueueGroup(actions_for_recipe(drink))
return
elif self.request.get('random') == 'bubbly sour':
controller.EnqueueGroup(actions_for_recipe(RandomBubblySourDrink()))
elif self.request.get('random') == 'bubbly boozy':
controller.EnqueueGroup(actions_for_recipe(RandomBubblySpirituousDrink()))
elif self.request.get('random') == 'sour':
controller.EnqueueGroup(actions_for_recipe(RandomSourDrink()))
elif self.request.get('random') == 'boozy':
controller.EnqueueGroup(actions_for_recipe(RandomSpirituousDrink()))
self.response.status = 400
class PrimeHandler(webapp2.RequestHandler):
def post(self):
controller.EnqueueGroup(actions_for_recipe(manual_db.Recipe(
name='Prime',
ingredients=[
manual_db.Ingredient(
#manual_db.Oz(.725), ingredient)
manual_db.Oz(.2), ingredient)
for ingredient in ingredients.IngredientsOrdered()[:]
if ingredient != "air"
],
#for ingredient in ingredients.IngredientsOrdered() if "itters" in ingredient],
user_name="dev console")))
class FlushHandler(webapp2.RequestHandler):
def post(self):
flush_ingredients = [
manual_db.Ingredient(
manual_db.Oz(.725), ingredient)
for ingredient in ingredients.IngredientsOrdered()
if ingredient != "air"
]
actions = []
sorted_ingredients = sorted(
flush_ingredients,
key=lambda i: ingredients.IngredientNameToValvePosition(i.name, "Flush"))
for ingredient in sorted_ingredients:
valve = ingredients.IngredientNameToValvePosition(ingredient.name,
"Flush")
actions.append(SetLedForValve(valve, 255, 0, 0))
for ingredient in sorted_ingredients:
valve = ingredients.IngredientNameToValvePosition(ingredient.name,
"Flush")
actions.append(Move(valve_position(valve)))
actions.append(SetLedForValve(valve, 0, 255, 0))
actions.append(MeterBitters(valve_to_actuate=valve,
drops_to_meter=1))
actions.append(SetLedForValve(valve, 0, 128, 255))
actions.append(Move(0.0))
actions.append(Home(carefully=False))
for ingredient in sorted_ingredients:
valve = ingredients.IngredientNameToValvePosition(ingredient.name,
"Flush")
actions.append(SetLedForValve(valve, 0, 0, 0))
controller.EnqueueGroup(actions)
class HoldPressureHandler(webapp2.RequestHandler):
def post(self):
print "Hold pressure."
controller.EnqueueGroup([HoldPressure()])
class SlamStirHandler(webapp2.RequestHandler):
def post(self):
print "Slam stir."
controller.EnqueueGroup([
Move(STIR_POSITION),
SlamStir(),
])
class ReleasePressureHandler(webapp2.RequestHandler):
def post(self):
controller.EnqueueGroup([ReleasePressure()])
class DispenseCupHandler(webapp2.RequestHandler):
def post(self):
controller.EnqueueGroup([DispenseCup()])
class DispenseCupFullTestHandler(webapp2.RequestHandler):
def post(self):
controller.EnqueueGroup([
Move(CUP_DISPENSE_POSITION), DispenseCup(), Move(valve_position(0)),
ReleaseCup()
])
class FillHandler(webapp2.RequestHandler):
def post(self):
print "FILL HANDLER"
try:
args = self.request.get('text').replace(" ", "").partition(",")
valve = int(args[2])
oz = float(args[0])
controller.EnqueueGroup([
SetLedForValve(valve, 255, 0, 0), Move(valve_position(valve)),
SetLedForValve(valve, 0, 255, 0),
Meter(valve_to_actuate=valve, oz_to_meter=oz),
SetLedForValve(valve, 0, 128, 255)
])
except ValueError:
self.response.status = 400
self.response.write("valve and oz arguments are required.")
class Test1Handler(webapp2.RequestHandler):
def post(self):
print "TEST1 HANDLER"
try:
test_drink = manual_db.TEST_DRINK
controller.EnqueueGroup(actions_for_recipe(test_drink))
except ValueError:
self.response.status = 400
self.response.write("valve and oz arguments are required.")
class CustomDrinkHandler(webapp2.RequestHandler):
def post(self):
try:
recipe_obj = json.loads(self.request.get('recipe'))
recipe = recipe_from_json_object(recipe_obj)
print "Drink requested: %s", recipe
controller.EnqueueGroup(actions_for_recipe(recipe))
self.response.status = 200
self.response.write("ok")
except ValueError:
print 'Error parsing custom drink request: %s' % (
self.request.get('recipe', None))
traceback.print_exc()
self.response.status = 400
self.response.write("valve and oz arguments are required.")
class MoveHandler(webapp2.RequestHandler):
def post(self):
print self.request
controller.EnqueueGroup([Move(float(self.request.get('text')))])
class PausableWSGIApplication(webapp2.WSGIApplication):
def __init__(self, routes=None, debug=False, config=None):
super(PausableWSGIApplication, self).__init__(routes=routes,
debug=debug,
config=config)
self.drop_all = False
def __call__(self, environ, start_response):
while self.drop_all:
time.sleep(1.0)
return super(PausableWSGIApplication, self).__call__(environ,
start_response)
def StartServer(port, syncer):
from paste import httpserver
logging.info("Starting server on port %d", port)
#app = webapp2.WSGIApplication([
app = PausableWSGIApplication([
# User UI
('/all_drinks', AllDrinksHandler),
('/create_drink', CustomDrinkHandler),
# User API
('/api/drink', DrinkHandler),
# Debug UI
('/load_cell', ServeFile(STATIC_FILE_DIR + 'load_cell.html')),
('/load_cell.json', LoadCellJson),
('/queue', InspectQueue),
('/queue.json', InspectQueueJson),
# Control API
('/queue-retry', RetryQueue),
('/queue-clear', ClearQueue),
('/queue-skip', SkipQueue),
('/display', ServeFile(STATIC_FILE_DIR + 'display.html')),
# Debug API
('/api/calibrate', SingleActionHandler(Home)),
('/api/compressor-on',
SingleActionHandler(lambda: CompressorToggle(State.ON))),
('/api/compressor-off',
SingleActionHandler(lambda: CompressorToggle(State.OFF))),
('/api/prime', PrimeHandler),
('/api/flush', FlushHandler),
('/api/hold_pressure', HoldPressureHandler),
('/api/slam_stir', SlamStirHandler),
('/api/release_pressure', ReleasePressureHandler),
('/api/dispense_cup', DispenseCupHandler),
('/api/dispense_cup_full_test', DispenseCupFullTestHandler),
('/api/move.*', MoveHandler),
('/api/fill.*', FillHandler),
('/api/test1.*', Test1Handler),
# Default to serving static files.
('/', ServeFile(STATIC_FILE_DIR + 'index.html')),
('/.*', StaticFileHandler),
])
controller.app = app
controller.EnqueueGroup([HoldPressure()])
if syncer: syncer.start()
print "serving at http://%s:%i" % (socket.gethostname(), port)
httpserver.serve(app, host="0.0.0.0", port=port, start_loop=True)
def main():
FLAGS(sys.argv)
# Set up logging
logging.basicConfig(format='%(asctime)s %(message)s')
rootLogger = logging.getLogger()
rootLogger.setLevel(getattr(logging, FLAGS.loglevel.upper()))
if FLAGS.logfile:
rootLogger.addHandler(logging.FileHandler(FLAGS.logfile))
if FLAGS.logtostderr:
rootLogger.addHandler(logging.StreamHandler())
global robot
global controller
if FLAGS.fake:
robot = FakeRobot()
else:
from physical_robot import PhysicalRobot
robot = PhysicalRobot()
controller = Controller(robot)
syncer = None
if FLAGS.frontend:
print 'Polling frontend at --frontend=%s' % FLAGS.frontend
syncer = poll_appengine.SyncToServer(FLAGS.frontend + '/api/',
FLAGS.fe_poll_freq, controller)
StartServer(FLAGS.port, syncer)
if __name__ == "__main__":
gflags.DEFINE_integer('port', 8000, 'Port to run on')
gflags.DEFINE_bool('fake', False, 'Run with hardware faked out')
gflags.DEFINE_string('logfile', '',
'File to log to. If empty, does not log to a file')
gflags.DEFINE_boolean('logtostderr', False, 'Log to stderr instead of a file')
gflags.DEFINE_enum('loglevel', 'info', ('debug', 'info', 'warning', 'error'),
'Log verbosity')
gflags.DEFINE_string('frontend', '', 'Frontend server to sync with, if any')
gflags.DEFINE_integer('fe_poll_freq', 5,
'Frontend polling frequency in seconds')
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Field value parsing for the standard Java compiler.
*/
package org.springframework.boot.configurationprocessor.fieldvalues.javac; | java | github | https://github.com/spring-projects/spring-boot | configuration-metadata/spring-boot-configuration-processor/src/main/java/org/springframework/boot/configurationprocessor/fieldvalues/javac/package-info.java |
# Define routes
Routes serve as the fundamental building blocks for navigation within an Angular app.
## What are routes?
In Angular, a **route** is an object that defines which component should render for a specific URL path or pattern, as well as additional configuration options about what happens when a user navigates to that URL.
Here is a basic example of a route:
```ts
import {AdminPage} from './app-admin';
const adminPage = {
path: 'admin',
component: AdminPage,
};
```
For this route, when a user visits the `/admin` path, the app will display the `AdminPage` component.
### Managing routes in your application
Most projects define routes in a separate file that contains `routes` in the filename.
A collection of routes looks like this:
```ts
import {Routes} from '@angular/router';
import {HomePage} from './home-page';
import {AdminPage} from './about-page';
export const routes: Routes = [
{
path: '',
component: HomePage,
},
{
path: 'admin',
component: AdminPage,
},
];
```
Tip: If you generated a project with Angular CLI, your routes are defined in `src/app/app.routes.ts`.
### Adding the router to your application
When bootstrapping an Angular application without the Angular CLI, you can pass a configuration object that includes a `providers` array.
Inside of the `providers` array, you can add the Angular router to your application by adding a `provideRouter` function call with your routes.
```ts
import {ApplicationConfig} from '@angular/core';
import {provideRouter} from '@angular/router';
import {routes} from './app.routes';
export const appConfig: ApplicationConfig = {
providers: [
provideRouter(routes),
// ...
],
};
```
## Route URL Paths
### Static URL Paths
Static URL Paths refer to routes with predefined paths that don't change based on dynamic parameters. These are routes that match a `path` string exactly and have a fixed outcome.
Examples of this include:
- "/admin"
- "/blog"
- "/settings/account"
### Define URL Paths with Route Parameters
Parameterized URLs allow you to define dynamic paths that allow multiple URLs to the same component while dynamically displaying data based on parameters in the URL.
You can define this type of pattern by adding parameters to your route’s `path` string and prefixing each parameter with the colon (`:`) character.
IMPORTANT: Parameters are distinct from information in the URL's [query string](https://en.wikipedia.org/wiki/Query_string).
Learn more about [query parameters in Angular in this guide](/guide/routing/read-route-state#query-parameters).
The following example displays a user profile component based on the user id passed in through the URL.
```ts
import {Routes} from '@angular/router';
import {UserProfile} from './user-profile/user-profile';
const routes: Routes = [{path: 'user/:id', component: UserProfile}];
```
In this example, URLs such as `/user/leeroy` and `/user/jenkins` render the `UserProfile` component. This component can then read the `id` parameter and use it to perform additional work, such as fetching data. See [reading route state guide](/guide/routing/read-route-state) for details on reading route parameters.
Valid route parameter names must start with a letter (a-z, A-Z) and can only contain:
- Letters (a-z, A-Z)
- Numbers (0-9)
- Underscore (\_)
- Hyphen (-)
You can also define paths with multiple parameters:
```ts
import {Routes} from '@angular/router';
import {UserProfile} from './user-profile';
import {SocialMediaFeed} from './social-media-feed';
const routes: Routes = [
{path: 'user/:id/:social-media', component: SocialMediaFeed},
{path: 'user/:id/', component: UserProfile},
];
```
With this new path, users can visit `/user/leeroy/youtube` and `/user/leeroy/bluesky` and see respective social media feeds based on the parameter for the user leeroy.
See [Reading route state](/guide/routing/read-route-state) for details on reading route parameters.
### Wildcards
When you need to catch all routes for a specific path, the solution is a wildcard route which is defined with the double asterisk (`**`).
A common example is defining a Page Not Found component.
```ts
import {Home} from './home/home';
import {UserProfile} from './user-profile';
import {NotFound} from './not-found';
const routes: Routes = [
{path: 'home', component: Home},
{path: 'user/:id', component: UserProfile},
{path: '**', component: NotFound},
];
```
In this routes array, the app displays the `NotFound` component when the user visits any path outside of `home` and `user/:id`.
Tip: Wildcard routes are typically placed at the end of a routes array.
## How Angular matches URLs
When you define routes, the order is important because Angular uses a first-match wins strategy. This means that once Angular matches a URL with a route `path`, it stops checking any further routes. As a result, always put more specific routes before less specific routes.
The following example shows routes defined from most-specific to least specific:
```ts
const routes: Routes = [
{path: '', component: Home}, // Empty path
{path: 'users/new', component: NewUser}, // Static, most specific
{path: 'users/:id', component: UserDetail}, // Dynamic
{path: 'users', component: Users}, // Static, less specific
{path: '**', component: NotFound}, // Wildcard - always last
];
```
If a user visits `/users/new`, Angular router would go through the following steps:
1. Checks `''` - doesn't match
1. Checks `users/new` - matches! Stops here
1. Never reaches `users/:id` even though it could match
1. Never reaches `users`
1. Never reaches `**`
## Route Loading Strategies
Understanding how and when routes and components load in Angular routing is crucial for building responsive web applications. Angular offers two primary strategies to control loading behavior:
1. **Eagerly loaded**: Routes and components that are loaded immediately
2. **Lazily loaded**: Routes and components loaded only when needed
Each approach offers distinct advantages for different scenarios.
### Eagerly loaded components
When you define a route with the `component` property, the referenced component is eagerly loaded as part of the same JavaScript bundle as the route configuration.
```ts
import {Routes} from '@angular/router';
import {HomePage} from './components/home/home-page';
import {LoginPage} from './components/auth/login-page';
export const routes: Routes = [
// HomePage and LoginPage are both directly referenced in this config,
// so their code is eagerly included in the same JavaScript bundle as this file.
{
path: '',
component: HomePage,
},
{
path: 'login',
component: LoginPage,
},
];
```
Eagerly loading route components like this means that the browser has to download and parse all of the JavaScript for these components as part of your initial page load, but the components are available to Angular immediately.
While including more JavaScript in your initial page load leads to slower initial load times, this can lead to more seamless transitions as the user navigates through an application.
### Lazily loaded components and routes
You can use the `loadComponent` property to lazily load the JavaScript for a component at the point at which that route would become active. The `loadChildren` property lazily loads child routes during route matching.
```ts
import {Routes} from '@angular/router';
export const routes: Routes = [
{
path: 'login',
loadComponent: () => import('./components/auth/login-page'),
},
{
path: 'admin',
loadComponent: () => import('./admin/admin.component'),
loadChildren: () => import('./admin/admin.routes'),
},
];
```
The `loadComponent` and `loadChildren` properties accept a loader function that returns a Promise that resolves to an Angular component or a set of routes respectively. In most cases, this function uses the standard [JavaScript dynamic import API](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/import). You can, however, use any arbitrary async loader function.
If the lazily loaded file uses a `default` export, you can return the `import()` promise directly without an additional `.then` call to select the exported class.
Lazily loading routes can significantly improve the load speed of your Angular application by removing large portions of JavaScript from the initial bundle. These portions of your code compile into separate JavaScript "chunks" that the router requests only when the user visits the corresponding route.
### Injection context lazy loading
The Router executes `loadComponent` and `loadChildren` within the **injection context of the current route**, allowing you to call [`inject`](/api/core/inject)inside these loader functions to access providers declared on that route, inherited from parent routes through hierarchical dependency injection, or available globally. This enables context-aware lazy loading.
```ts
import {Routes} from '@angular/router';
import {inject} from '@angular/core';
import {FeatureFlags} from './feature-flags';
export const routes: Routes = [
{
path: 'dashboard',
// Runs inside the route's injection context
loadComponent: () => {
const flags = inject(FeatureFlags);
return flags.isPremium
? import('./dashboard/premium-dashboard')
: import('./dashboard/basic-dashboard');
},
},
];
```
### Should I use an eager or a lazy route?
There are many factors to consider when deciding on whether a route should be eager or lazy.
In general, eager loading is recommended for primary landing page(s) while other pages would be lazy-loaded.
NOTE: While lazy routes have the upfront performance benefit of reducing the amount of initial data requested by the user, it adds future data requests that could be undesirable. This is particularly true when dealing with nested lazy loading at multiple levels, which can significantly impact performance.
## Redirects
You can define a route that redirects to another route instead of rendering a component:
```ts
import {Blog} from './home/blog';
const routes: Routes = [
{
path: 'articles',
redirectTo: '/blog',
},
{
path: 'blog',
component: Blog,
},
];
```
If you modify or remove a route, some users may still click on out-of-date links or bookmarks to that route. You can add a redirect to direct those users to an appropriate alternative route instead of a "not found" page.
## Page titles
You can associate a **title** with each route. Angular automatically updates the [page title](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/title) when a route activates. Always define appropriate page titles for your application, as these titles are necessary to create an accessible experience.
```ts
import {Routes} from '@angular/router';
import {Home} from './home';
import {About} from './about';
import {Products} from './products';
const routes: Routes = [
{
path: '',
component: Home,
title: 'Home Page',
},
{
path: 'about',
component: About,
title: 'About Us',
},
];
```
The page `title` property can be set dynamincally to a resolver function using [`ResolveFn`](/api/router/ResolveFn).
```ts
const titleResolver: ResolveFn<string> = (route) => route.queryParams['id'];
const routes: Routes = [
...{
path: 'products',
component: Products,
title: titleResolver,
},
];
```
Route titles can also be set via a service extending the [`TitleStrategy`](/api/router/TitleStrategy) abstract class. By default, Angular uses the [`DefaultTitleStrategy`](/api/router/DefaultTitleStrategy).
### Using TitleStrategy for page titles
For advanced scenarios where you need centralized control over how the document title is composed, implement a `TitleStrategy`.
`TitleStrategy` is a token you can provide to override the default title strategy used by Angular. You can supply a custom `TitleStrategy` to implement conventions such as adding an application suffix, formatting titles from breadcrumbs, or generating titles dynamically from route data.
```ts
import {inject, Injectable} from '@angular/core';
import {Title} from '@angular/platform-browser';
import {TitleStrategy, RouterStateSnapshot} from '@angular/router';
@Injectable()
export class AppTitleStrategy extends TitleStrategy {
private readonly title = inject(Title);
updateTitle(snapshot: RouterStateSnapshot): void {
// PageTitle is equal to the "Title" of a route if it's set
// If its not set it will use the "title" given in index.html
const pageTitle = this.buildTitle(snapshot) || this.title.getTitle();
this.title.setTitle(`MyAwesomeApp - ${pageTitle}`);
}
}
```
To use the custom strategy, provide it with the `TitleStrategy` token at the application level:
```ts
import {provideRouter, TitleStrategy} from '@angular/router';
import {AppTitleStrategy} from './app-title.strategy';
export const appConfig = {
providers: [provideRouter(routes), {provide: TitleStrategy, useClass: AppTitleStrategy}],
};
```
## Route-level providers for dependency injection
Each route has a `providers` property that lets you provide dependencies to that route's content via [dependency injection](/guide/di).
Common scenarios where this can be helpful include applications that have different services based on whether the user is an admin or not.
```ts
export const ROUTES: Route[] = [
{
path: 'admin',
providers: [AdminService, {provide: ADMIN_API_KEY, useValue: '12345'}],
children: [
{path: 'users', component: AdminUsers},
{path: 'teams', component: AdminTeams},
],
},
// ... other application routes that don't
// have access to ADMIN_API_KEY or AdminService.
];
```
In this code sample, the `admin` path contains a protected data property of `ADMIN_API_KEY` that is only available to children within its section. As a result, no other paths will be able to access the data provided via `ADMIN_API_KEY`.
See the [Dependency injection guide](/guide/di) for more information about providers and injection in Angular.
## Associating data with routes
Route data enables you to attach additional information to routes. You are able to configure how components behave based on this data.
There are two ways to work with route data: static data that remains constant, and dynamic data that can change based on runtime conditions.
### Static data
You can associate arbitrary static data with a route via the `data` property in order to centralize things like route-specific metadata (e.g., analytics tracking, permissions, etc.):
```ts
import {Routes} from '@angular/router';
import {Home} from './home';
import {About} from './about';
import {Products} from './products';
const routes: Routes = [
{
path: 'about',
component: About,
data: {analyticsId: '456'},
},
{
path: '',
component: Home,
data: {analyticsId: '123'},
},
];
```
In this code sample, the home and about page are configured with specific `analyticsId` which would then be used in their respective components for page tracking analytics.
You can read this static data by injecting the `ActivatedRoute`. See [Reading route state](/guide/routing/read-route-state) for details.
### Dynamic data with data resolvers
When you need to provide dynamic data to a route, check out the [guide on route data resolvers](/guide/routing/data-resolvers).
## Nested Routes
Nested routes, also known as child routes, are a common technique for managing more complex navigation routes where a component has a sub-view that changes based on the URL.
<img alt="Diagram to illustrate nested routes" src="assets/images/guide/router/nested-routing-diagram.svg">
You can add child routes to any route definition with the `children` property:
```ts
const routes: Routes = [
{
path: 'product/:id',
component: Product,
children: [
{
path: 'info',
component: ProductInfo,
},
{
path: 'reviews',
component: ProductReviews,
},
],
},
];
```
The above example defines a route for a product page that allows a user to change whether the product info or reviews are displayed based on the url.
The `children` property accepts an array of `Route` objects.
To display child routes, the parent component (`Product` in the example above) includes its own `<router-outlet>`.
```angular-html
<!-- Product -->
<article>
<h1>Product {{ id }}</h1>
<router-outlet />
</article>
```
After adding child routes to the configuration and adding a `<router-outlet>` to the component, navigation between URLs that match the child routes updates only the nested outlet.
## Next steps
Learn how to [display the contents of your routes with Outlets](/guide/routing/show-routes-with-outlets). | unknown | github | https://github.com/angular/angular | adev/src/content/guide/routing/define-routes.md |
import {useState} from 'react';
function Component(props) {
const items = props.items ? props.items.slice() : [];
const [state] = useState('');
return props.cond ? (
<div>{state}</div>
) : (
<div>
{items.map(item => (
<div key={item.id}>{item.name}</div>
))}
</div>
);
}
export const FIXTURE_ENTRYPOINT = {
fn: Component,
params: [{cond: false, items: [{id: 0, name: 'Alice'}]}],
sequentialRenders: [
{cond: false, items: [{id: 0, name: 'Alice'}]},
{
cond: false,
items: [
{id: 0, name: 'Alice'},
{id: 1, name: 'Bob'},
],
},
{
cond: true,
items: [
{id: 0, name: 'Alice'},
{id: 1, name: 'Bob'},
],
},
{
cond: false,
items: [
{id: 1, name: 'Bob'},
{id: 2, name: 'Claire'},
],
},
],
}; | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/fixtures/compiler/repro-mutable-range-extending-into-ternary.js |
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
httpapi: restconf
short_description: HttpApi Plugin for devices supporting Restconf API
description:
- This HttpApi plugin provides methods to connect to Restconf API
endpoints.
version_added: "2.8"
options:
root_path:
type: str
description:
- Specifies the location of the Restconf root.
default: '/restconf'
vars:
- name: ansible_httpapi_restconf_root
"""
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.connection import ConnectionError
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.plugins.httpapi import HttpApiBase
CONTENT_TYPE = 'application/yang-data+json'
class HttpApi(HttpApiBase):
def send_request(self, data, **message_kwargs):
if data:
data = json.dumps(data)
path = '/'.join([self.get_option('root_path').rstrip('/'), message_kwargs.get('path', '').lstrip('/')])
headers = {
'Content-Type': message_kwargs.get('content_type') or CONTENT_TYPE,
'Accept': message_kwargs.get('accept') or CONTENT_TYPE,
}
response, response_data = self.connection.send(path, data, headers=headers, method=message_kwargs.get('method'))
return handle_response(response, response_data)
def handle_response(response, response_data):
try:
response_data = json.loads(response_data.read())
except ValueError:
response_data = response_data.read()
if isinstance(response, HTTPError):
if response_data:
if 'errors' in response_data:
errors = response_data['errors']['error']
error_text = '\n'.join((error['error-message'] for error in errors))
else:
error_text = response_data
raise ConnectionError(error_text, code=response.code)
raise ConnectionError(to_text(response), code=response.code)
return response_data | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2beta1",
"metadata": {
"name": "v0alpha1.annotation-filtering.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana",
"version": "v0",
"datasource": {
"name": "-- Grafana --"
},
"spec": {
"limit": 100,
"matchAny": false,
"tags": [],
"type": "dashboard"
}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"legacyOptions": {
"type": "dashboard"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"lines": 4,
"refId": "Anno",
"scenarioId": "annotations"
}
},
"enable": true,
"hide": false,
"iconColor": "red",
"name": "Red, only panel 1",
"filter": {
"ids": [
1
]
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"lines": 5,
"refId": "Anno",
"scenarioId": "annotations"
}
},
"enable": true,
"hide": false,
"iconColor": "yellow",
"name": "Yellow - all except 1",
"filter": {
"exclude": true,
"ids": [
1
]
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "DataQuery",
"group": "grafana-testdata-datasource",
"version": "v0",
"spec": {
"lines": 6,
"refId": "Anno",
"scenarioId": "annotations"
}
},
"enable": true,
"hide": false,
"iconColor": "dark-purple",
"name": "Purple only panel 3+4",
"filter": {
"ids": [
3,
4
]
}
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "Panel one",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Panel two",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-3": {
"kind": "Panel",
"spec": {
"id": 3,
"title": "Panel three",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Panel four",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "VizConfig",
"group": "timeseries",
"version": "",
"spec": {
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": [
{
"value": null,
"color": "green"
},
{
"value": 80,
"color": "red"
}
]
},
"color": {
"mode": "palette-classic"
},
"custom": {
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
}
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 0,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 8,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-3"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 12,
"y": 8,
"width": 12,
"height": 8,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [
"gdev",
"annotations"
],
"timeSettings": {
"timezone": "",
"from": "now-30m",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "Annotation filtering",
"variables": []
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v0alpha1"
}
}
} | json | github | https://github.com/grafana/grafana | apps/dashboard/pkg/migration/conversion/testdata/output/migrated_dev_dashboards/annotations/v0alpha1.annotation-filtering.v42.v2beta1.json |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_vol
short_description: create and attach a volume, return volume id and device map
description:
- creates an EBS volume and optionally attaches it to an instance.
If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made.
This module has a dependency on python-boto.
version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
name:
description:
- volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
version_added: "1.6"
id:
description:
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
default: null
version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
required: false
default: null
volume_type:
description:
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS), st1 (Throughput Optimized HDD), sc1 (Cold HDD).
"Standard" is the old EBS default and continues to remain the Ansible default for backwards compatibility.
required: false
default: standard
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
required: false
default: 100
version_added: "1.3"
encrypted:
description:
- Enable encryption at rest for this volume.
default: false
version_added: "1.8"
kms_key_id:
description:
- Specify the id of the KMS key to use.
default: null
version_added: "2.3"
device_name:
description:
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
required: false
default: null
delete_on_termination:
description:
- When set to "yes", the volume will be deleted upon instance termination.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.1"
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
snapshot:
description:
- snapshot ID on which to base the volume
required: false
default: null
version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
version_added: "1.5"
state:
description:
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
required: false
default: present
choices: ['absent', 'present', 'list']
version_added: "1.6"
tags:
description:
- tag:value pairs to add to the volume after creation
required: false
default: {}
version_added: "2.3"
author: "Lester Wade (@lwade)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple attachment action
- ec2_vol:
instance: XXXXXX
volume_size: 5
device_name: sdd
# Example using custom iops params
- ec2_vol:
instance: XXXXXX
volume_size: 5
iops: 100
device_name: sdd
# Example using snapshot id
- ec2_vol:
instance: XXXXXX
snapshot: "{{ snapshot }}"
# Playbook example combined with instance launch
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
wait: yes
count: 3
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
volume_size: 5
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Example: Launch an instance and then add a volume if not already attached
# * Volume will be created with the given name if not already created.
# * Nothing will happen if the volume is already attached.
# * Requires Ansible 2.0
- ec2:
keypair: "{{ keypair }}"
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: "{{ ec2.instances }}"
register: ec2_vol
# Remove a volume
- ec2_vol:
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
# Attach an existing volume to instance. The volume will be deleted upon instance termination.
- ec2_vol:
instance: XXXXXX
id: XXXXXX
device_name: /dev/sdf
delete_on_termination: yes
'''
RETURN = '''
device:
description: device name of attached volume
returned: when success
type: string
sample: "/def/sdf"
volume_id:
description: the id of volume
returned: when success
type: string
sample: "vol-35b333d9"
volume_type:
description: the volume type
returned: when success
type: string
sample: "standard"
volume:
description: a dictionary containing detailed attributes of the volume
returned: when success
type: string
sample: {
"attachment_set": {
"attach_time": "2015-10-23T00:22:29.000Z",
"deleteOnTermination": "false",
"device": "/dev/sdf",
"instance_id": "i-8356263c",
"status": "attached"
},
"create_time": "2015-10-21T14:36:08.870Z",
"encrypted": false,
"id": "vol-35b333d9",
"iops": null,
"size": 1,
"snapshot_id": "",
"status": "in-use",
"tags": {
"env": "dev"
},
"type": "standard",
"zone": "us-east-1b"
}
'''
import time
from distutils.version import LooseVersion
try:
import boto
import boto.ec2
import boto.exception
from boto.exception import BotoServerError
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO, AnsibleAWSError, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def get_volume(module, ec2):
name = module.params.get('name')
id = module.params.get('id')
zone = module.params.get('zone')
filters = {}
volume_ids = None
# If no name or id supplied, just try volume creation based on module parameters
if id is None and name is None:
return None
if zone:
filters['availability_zone'] = zone
if name:
filters = {'tag:Name': name}
if id:
volume_ids = [id]
try:
vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if not vols:
if id:
msg = "Could not find the volume with id: %s" % id
if name:
msg += (" and name: %s" % name)
module.fail_json(msg=msg)
else:
return None
if len(vols) > 1:
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
return vols[0]
def get_volumes(module, ec2):
instance = module.params.get('instance')
try:
if not instance:
vols = ec2.get_all_volumes()
else:
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return vols
def delete_volume(module, ec2):
volume_id = module.params['id']
try:
ec2.delete_volume(volume_id)
module.exit_json(changed=True)
except boto.exception.EC2ResponseError as ec2_error:
if ec2_error.code == 'InvalidVolume.NotFound':
module.exit_json(changed=False)
module.fail_json(msg=ec2_error.message)
def boto_supports_volume_encryption():
"""
Check if Boto library supports encryption of EBS volumes (added in 2.29.0)
Returns:
True if boto library has the named param as an argument on the request_spot_instances method, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
def boto_supports_kms_key_id():
"""
Check if Boto library supports kms_key_ids (added in 2.39.0)
Returns:
True if version is equal to or higher then the version needed, else False
"""
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.39.0')
def create_volume(module, ec2, zone):
changed = False
name = module.params.get('name')
iops = module.params.get('iops')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
volume_size = module.params.get('volume_size')
volume_type = module.params.get('volume_type')
snapshot = module.params.get('snapshot')
tags = module.params.get('tags')
# If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
volume = get_volume(module, ec2)
if volume is None:
try:
if boto_supports_volume_encryption():
if kms_key_id is not None:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted, kms_key_id)
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
changed = True
else:
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
changed = True
while volume.status != 'available':
time.sleep(3)
volume.update()
if name:
tags["Name"] = name
if tags:
ec2.create_tags([volume.id], tags)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return volume, changed
def attach_volume(module, ec2, volume, instance):
device_name = module.params.get('device_name')
delete_on_termination = module.params.get('delete_on_termination')
changed = False
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
if device_name is None:
try:
if not ec2.get_password_data(instance.id):
device_name = '/dev/sdf'
else:
device_name = '/dev/xvdf'
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if volume.attachment_state() is not None:
adata = volume.attach_data
if adata.instance_id != instance.id:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
% (volume.id, adata.instance_id))
else:
# Volume is already attached to right instance
changed = modify_dot_attribute(module, ec2, instance, device_name)
else:
try:
volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
modify_dot_attribute(module, ec2, instance, device_name)
return volume, changed
def modify_dot_attribute(module, ec2, instance, device_name):
""" Modify delete_on_termination attribute """
delete_on_termination = module.params.get('delete_on_termination')
changed = False
try:
instance.update()
dot = instance.block_device_mapping[device_name].delete_on_termination
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
if delete_on_termination != dot:
try:
bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
bdm = BlockDeviceMapping()
bdm[device_name] = bdt
ec2.modify_instance_attribute(instance_id=instance.id, attribute='blockDeviceMapping', value=bdm)
while instance.block_device_mapping[device_name].delete_on_termination != delete_on_termination:
time.sleep(3)
instance.update()
changed = True
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
return changed
def detach_volume(module, ec2, volume):
changed = False
if volume.attachment_state() is not None:
adata = volume.attach_data
volume.detach()
while volume.attachment_state() is not None:
time.sleep(3)
volume.update()
changed = True
return volume, changed
def get_volume_info(volume, state):
# If we're just listing volumes then do nothing, else get the latest update for the volume
if state != 'list':
volume.update()
volume_info = {}
attachment = volume.attach_data
volume_info = {
'create_time': volume.create_time,
'encrypted': volume.encrypted,
'id': volume.id,
'iops': volume.iops,
'size': volume.size,
'snapshot_id': volume.snapshot_id,
'status': volume.status,
'type': volume.type,
'zone': volume.zone,
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
'instance_id': attachment.instance_id,
'status': attachment.status
},
'tags': volume.tags
}
if hasattr(attachment, 'deleteOnTermination'):
volume_info['attachment_set']['deleteOnTermination'] = attachment.deleteOnTermination
return volume_info
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance = dict(),
id = dict(),
name = dict(),
volume_size = dict(),
volume_type = dict(choices=['standard', 'gp2', 'io1', 'st1', 'sc1'], default='standard'),
iops = dict(),
encrypted = dict(type='bool', default=False),
kms_key_id = dict(),
device_name = dict(),
delete_on_termination = dict(type='bool', default=False),
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
snapshot = dict(),
state = dict(choices=['absent', 'present', 'list'], default='present'),
tags = dict(type='dict', default={})
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
id = module.params.get('id')
name = module.params.get('name')
instance = module.params.get('instance')
volume_size = module.params.get('volume_size')
encrypted = module.params.get('encrypted')
kms_key_id = module.params.get('kms_key_id')
device_name = module.params.get('device_name')
zone = module.params.get('zone')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
tags = module.params.get('tags')
# Ensure we have the zone or can get the zone
if instance is None and zone is None and state == 'present':
module.fail_json(msg="You must specify either instance or zone")
# Set volume detach flag
if instance == 'None' or instance == '':
instance = None
detach_vol_flag = True
else:
detach_vol_flag = False
# Set changed flag
changed = False
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'list':
returned_volumes = []
vols = get_volumes(module, ec2)
for v in vols:
attachment = v.attach_data
returned_volumes.append(get_volume_info(v, state))
module.exit_json(changed=False, volumes=returned_volumes)
if encrypted and not boto_supports_volume_encryption():
module.fail_json(msg="You must use boto >= v2.29.0 to use encrypted volumes")
if kms_key_id is not None and not boto_supports_kms_key_id():
module.fail_json(msg="You must use boto >= v2.39.0 to use kms_key_id")
# Here we need to get the zone info for the instance. This covers situation where
# instance is specified but zone isn't.
# Useful for playbooks chaining instance launch with volume create + attach and where the
# zone doesn't matter to the user.
inst = None
if instance:
try:
reservation = ec2.get_all_instances(instance_ids=instance)
except BotoServerError as e:
module.fail_json(msg=e.message)
inst = reservation[0].instances[0]
zone = inst.placement
# Check if there is a volume already mounted there.
if device_name:
if device_name in inst.block_device_mapping:
module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
volume_id=inst.block_device_mapping[device_name].volume_id,
device=device_name,
changed=False)
# Delaying the checks until after the instance check allows us to get volume ids for existing volumes
# without needing to pass an unused volume_size
if not volume_size and not (id or name or snapshot):
module.fail_json(msg="You must specify volume_size or identify an existing volume by id, name, or snapshot")
if volume_size and id:
module.fail_json(msg="Cannot specify volume_size together with id")
if state == 'present':
volume, changed = create_volume(module, ec2, zone)
if detach_vol_flag:
volume, changed = detach_volume(module, ec2, volume)
elif inst is not None:
volume, changed = attach_volume(module, ec2, volume, inst)
# Add device, volume_id and volume_type parameters separately to maintain backward compatibility
volume_info = get_volume_info(volume, state)
# deleteOnTermination is not correctly reflected on attachment
if module.params.get('delete_on_termination'):
for attempt in range(0, 8):
if volume_info['attachment_set'].get('deleteOnTermination') == 'true':
break
time.sleep(5)
volume = ec2.get_all_volumes(volume_ids=volume.id)[0]
volume_info = get_volume_info(volume, state)
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'],
volume_id=volume_info['id'], volume_type=volume_info['type'])
elif state == 'absent':
delete_volume(module, ec2)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
"""Ensure emitted events contain the fields legacy processors expect to find."""
from mock import sentinel
from django.test.utils import override_settings
from openedx.core.lib.tests.assertions.events import assert_events_equal
from track.tests import EventTrackingTestCase, FROZEN_TIME
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
GOOGLE_ANALYTICS_PROCESSOR = [
{
'ENGINE': 'track.shim.GoogleAnalyticsProcessor'
}
]
@override_settings(
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
class LegacyFieldMappingProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
def test_event_field_mapping(self):
data = {sentinel.key: sentinel.value}
context = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'accept_language': sentinel.accept_language,
'referer': sentinel.referer,
'event_type': sentinel.name,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
assert_events_equal(expected_event, emitted_event)
def test_missing_fields(self):
self.tracker.emit(sentinel.name)
emitted_event = self.get_event()
expected_event = {
'accept_language': '',
'referer': '',
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
assert_events_equal(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=GOOGLE_ANALYTICS_PROCESSOR,
)
class GoogleAnalyticsProcessorTestCase(EventTrackingTestCase):
"""Ensure emitted events contain the fields necessary for Google Analytics."""
def test_event_fields(self):
""" Test that course_id is added as the label if present, and nonInteraction is set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'label': sentinel.course_id,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event)
def test_no_course_id(self):
""" Test that a label is not added if course_id is not specified, but nonInteraction is still set. """
data = {sentinel.key: sentinel.value}
context = {
'path': sentinel.path,
'user_id': sentinel.user_id,
'client_id': sentinel.client_id,
}
with self.tracker.context('test', context):
self.tracker.emit(sentinel.name, data)
emitted_event = self.get_event()
expected_event = {
'context': context,
'data': data,
'name': sentinel.name,
'nonInteraction': 1,
'timestamp': FROZEN_TIME,
}
assert_events_equal(expected_event, emitted_event) | unknown | codeparrot/codeparrot-clean | ||
use crate::io::AsyncWrite;
use bytes::Buf;
use pin_project_lite::pin_project;
use std::future::Future;
use std::io::{self, IoSlice};
use std::marker::PhantomPinned;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
pin_project! {
/// A future to write some of the buffer to an `AsyncWrite`.
#[derive(Debug)]
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct WriteBuf<'a, W, B> {
writer: &'a mut W,
buf: &'a mut B,
#[pin]
_pin: PhantomPinned,
}
}
/// Tries to write some bytes from the given `buf` to the writer in an
/// asynchronous manner, returning a future.
pub(crate) fn write_buf<'a, W, B>(writer: &'a mut W, buf: &'a mut B) -> WriteBuf<'a, W, B>
where
W: AsyncWrite + Unpin,
B: Buf,
{
WriteBuf {
writer,
buf,
_pin: PhantomPinned,
}
}
impl<W, B> Future for WriteBuf<'_, W, B>
where
W: AsyncWrite + Unpin,
B: Buf,
{
type Output = io::Result<usize>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> {
const MAX_VECTOR_ELEMENTS: usize = 64;
let me = self.project();
if !me.buf.has_remaining() {
return Poll::Ready(Ok(0));
}
let n = if me.writer.is_write_vectored() {
let mut slices = [IoSlice::new(&[]); MAX_VECTOR_ELEMENTS];
let cnt = me.buf.chunks_vectored(&mut slices);
ready!(Pin::new(&mut *me.writer).poll_write_vectored(cx, &slices[..cnt]))?
} else {
ready!(Pin::new(&mut *me.writer).poll_write(cx, me.buf.chunk()))?
};
me.buf.advance(n);
Poll::Ready(Ok(n))
}
} | rust | github | https://github.com/tokio-rs/tokio | tokio/src/io/util/write_buf.rs |
'''trigger detect, return indices of zero value to nonzero change in trigger channel'''
from numpy import array, nonzero
class get:
def __init__(self, unsorteddatachunk, channelinstance, trigtype):
#trigtype=='TRIGGER' OR 'RESPONSE'
#trig=trigdet.get(d.data_blockunsorted, ch, 'RESPONSE')
'''channelstruct=channel.index(f, 'trig')'''
trigchpos=channelinstance.sortedind[channelinstance.sortedch==trigtype]
#trigchpos=channelstruct.sortedindtype[channelstruct.sortedindtype[channelstruct.sortedlabeltype=='TRIGGER']]
self.trigdata = unsorteddatachunk[:,trigchpos[0]]
trigpositive=array(nonzero(unsorteddatachunk[:,trigchpos[0]]))
nonzeropulsebool=unsorteddatachunk[trigpositive,trigchpos[0]]+unsorteddatachunk[trigpositive-1,trigchpos[0]]==unsorteddatachunk[trigpositive,trigchpos[0]]
self.trigstartind=array(trigpositive[nonzeropulsebool][1::])
#for debugging
self.nz = nonzeropulsebool
self.tp = trigpositive
self.trigchpos = trigchpos | unknown | codeparrot/codeparrot-clean | ||
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard.api import sahara as saharaclient
from openstack_dashboard.dashboards.project. \
data_processing.data_image_registry.forms import EditTagsForm
from openstack_dashboard.dashboards.project. \
data_processing.data_image_registry.forms import RegisterImageForm
from openstack_dashboard.dashboards.project. \
data_processing.data_image_registry.tables import ImageRegistryTable
LOG = logging.getLogger(__name__)
class ImageRegistryView(tables.DataTableView):
table_class = ImageRegistryTable
template_name = (
'project/data_processing.data_image_registry/image_registry.html')
page_title = _("Image Registry")
def get_data(self):
try:
images = saharaclient.image_list(self.request)
except Exception:
images = []
msg = _('Unable to retrieve image list')
exceptions.handle(self.request, msg)
return images
def update_context_with_plugin_tags(request, context):
try:
plugins = saharaclient.plugin_list(request)
except Exception:
plugins = []
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
plugins_object = dict()
for plugin in plugins:
plugins_object[plugin.name] = dict()
for version in plugin.versions:
try:
details = saharaclient. \
plugin_get_version_details(request,
plugin.name,
version)
plugins_object[plugin.name][version] = (
details.required_image_tags)
except Exception:
msg = _("Unable to process plugin tags")
exceptions.handle(request, msg)
context["plugins"] = plugins_object
class EditTagsView(forms.ModalFormView):
form_class = EditTagsForm
template_name = (
'project/data_processing.data_image_registry/edit_tags.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Edit Image Tags")
def get_context_data(self, **kwargs):
context = super(EditTagsView, self).get_context_data(**kwargs)
context['image'] = self.get_object()
update_context_with_plugin_tags(self.request, context)
return context
@memoized.memoized_method
def get_object(self):
try:
image = saharaclient.image_get(self.request,
self.kwargs["image_id"])
except Exception:
image = None
msg = _("Unable to fetch the image details")
exceptions.handle(self.request, msg)
return image
def get_initial(self):
image = self.get_object()
return {"image_id": image.id,
"tags_list": json.dumps(image.tags),
"user_name": image.username,
"description": image.description}
class RegisterImageView(forms.ModalFormView):
form_class = RegisterImageForm
template_name = (
'project/data_processing.data_image_registry/register_image.html')
success_url = reverse_lazy(
'horizon:project:data_processing.data_image_registry:index')
page_title = _("Register Image")
def get_context_data(self, **kwargs):
context = super(RegisterImageView, self).get_context_data(**kwargs)
update_context_with_plugin_tags(self.request, context)
return context
def get_initial(self):
# need this initialization to allow registration
# of images without tags
return {"tags_list": json.dumps([])} | unknown | codeparrot/codeparrot-clean | ||
""" Fantasm: A taskqueue-based Finite State Machine for App Engine Python
Docs and examples: http://code.google.com/p/fantasm/
Copyright 2010 VendAsta Technologies Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from fantasm import constants
class FSMRuntimeError(Exception):
""" The parent class of all Fantasm runtime errors. """
pass
class UnknownMachineError(FSMRuntimeError):
""" A machine could not be found. """
def __init__(self, machineName):
""" Initialize exception """
message = 'Cannot find machine "%s".' % machineName
super(UnknownMachineError, self).__init__(message)
class UnknownStateError(FSMRuntimeError):
""" A state could not be found """
def __init__(self, machineName, stateName):
""" Initialize exception """
message = 'State "%s" is unknown. (Machine %s)' % (stateName, machineName)
super(UnknownStateError, self).__init__(message)
class UnknownEventError(FSMRuntimeError):
""" An event and the transition bound to it could not be found. """
def __init__(self, event, machineName, stateName):
""" Initialize exception """
message = 'Cannot find transition for event "%s". (Machine %s, State %s)' % (event, machineName, stateName)
super(UnknownEventError, self).__init__(message)
class InvalidEventNameRuntimeError(FSMRuntimeError):
""" Event returned from dispatch is invalid (and would cause problems with task name restrictions). """
def __init__(self, event, machineName, stateName, instanceName):
""" Initialize exception """
message = 'Event "%r" returned by state is invalid. It must be a string and match pattern "%s". ' \
'(Machine %s, State %s, Instance %s)' % \
(event, constants.NAME_PATTERN, machineName, stateName, instanceName)
super(InvalidEventNameRuntimeError, self).__init__(message)
class InvalidFinalEventRuntimeError(FSMRuntimeError):
""" Event returned when a final state action returns an event. """
def __init__(self, event, machineName, stateName, instanceName):
""" Initialize exception """
message = 'Event "%r" returned by final state is invalid. ' \
'(Machine %s, State %s, Instance %s)' % \
(event, machineName, stateName, instanceName)
super(InvalidFinalEventRuntimeError, self).__init__(message)
class FanInWriteLockFailureRuntimeError(FSMRuntimeError):
""" Exception when fan-in writers are unable to acquire a lock. """
def __init__(self, event, machineName, stateName, instanceName):
""" Initialize exception """
message = 'Event "%r" unable to to be fanned-in due to write lock failure. ' \
'(Machine %s, State %s, Instance %s)' % \
(event, machineName, stateName, instanceName)
super(FanInWriteLockFailureRuntimeError, self).__init__(message)
class FanInReadLockFailureRuntimeError(FSMRuntimeError):
""" Exception when fan-in readers are unable to acquire a lock. """
def __init__(self, event, machineName, stateName, instanceName):
""" Initialize exception """
message = 'Event "%r" unable to to be fanned-in due to read lock failure. ' \
'(Machine %s, State %s, Instance %s)' % \
(event, machineName, stateName, instanceName)
super(FanInReadLockFailureRuntimeError, self).__init__(message)
class RequiredServicesUnavailableRuntimeError(FSMRuntimeError):
""" Some of the required API services are not available. """
def __init__(self, unavailableServices):
""" Initialize exception """
message = 'The following services will not be available in the %d seconds: %s. This task will be retried.' % \
(constants.REQUEST_LENGTH, unavailableServices)
super(RequiredServicesUnavailableRuntimeError, self).__init__(message)
class ConfigurationError(Exception):
""" Parent class for all Fantasm configuration errors. """
pass
class YamlFileNotFoundError(ConfigurationError):
""" The Yaml file could not be found. """
def __init__(self, filename):
""" Initialize exception """
message = 'Yaml configuration file "%s" not found.' % filename
super(YamlFileNotFoundError, self).__init__(message)
class YamlFileCircularImportError(ConfigurationError):
""" The Yaml is involved in a circular import. """
def __init__(self, filename):
""" Initialize exception """
message = 'Yaml configuration file "%s" involved in a circular import.' % filename
super(YamlFileCircularImportError, self).__init__(message)
class StateMachinesAttributeRequiredError(ConfigurationError):
""" The YAML file requires a 'state_machines' attribute. """
def __init__(self):
""" Initialize exception """
message = '"%s" is required attribute of yaml file.' % constants.STATE_MACHINES_ATTRIBUTE
super(StateMachinesAttributeRequiredError, self).__init__(message)
class MachineNameRequiredError(ConfigurationError):
""" Each machine requires a name. """
def __init__(self):
""" Initialize exception """
message = '"%s" is required attribute of machine.' % constants.MACHINE_NAME_ATTRIBUTE
super(MachineNameRequiredError, self).__init__(message)
class InvalidQueueNameError(ConfigurationError):
""" The queue name was not valid. """
def __init__(self, queueName, machineName):
""" Initialize exception """
message = 'Queue name "%s" must exist in queue.yaml. (Machine %s)' % (queueName, machineName)
super(InvalidQueueNameError, self).__init__(message)
class InvalidMachineNameError(ConfigurationError):
""" The machine name was not valid. """
def __init__(self, machineName):
""" Initialize exception """
message = 'Machine name must match pattern "%s". (Machine %s)' % (constants.NAME_PATTERN, machineName)
super(InvalidMachineNameError, self).__init__(message)
class MachineNameNotUniqueError(ConfigurationError):
""" Each machine in a YAML file must have a unique name. """
def __init__(self, machineName):
""" Initialize exception """
message = 'Machine names must be unique. (Machine %s)' % machineName
super(MachineNameNotUniqueError, self).__init__(message)
class MachineHasMultipleInitialStatesError(ConfigurationError):
""" Each machine must have exactly one initial state. """
def __init__(self, machineName):
""" Initialize exception """
message = 'Machine has multiple initial states, but only one is allowed. (Machine %s)' % machineName
super(MachineHasMultipleInitialStatesError, self).__init__(message)
class MachineHasNoInitialStateError(ConfigurationError):
""" Each machine must have exactly one initial state. """
def __init__(self, machineName):
""" Initialize exception """
message = 'Machine has no initial state, exactly one is required. (Machine %s)' % machineName
super(MachineHasNoInitialStateError, self).__init__(message)
class MachineHasNoFinalStateError(ConfigurationError):
""" Each machine must have at least one final state. """
def __init__(self, machineName):
""" Initialize exception """
message = 'Machine has no final states, but at least one is required. (Machine %s)' % machineName
super(MachineHasNoFinalStateError, self).__init__(message)
class StateNameRequiredError(ConfigurationError):
""" Each state requires a name. """
def __init__(self, machineName):
""" Initialize exception """
message = '"%s" is required attribute of state. (Machine %s)' % (constants.STATE_NAME_ATTRIBUTE, machineName)
super(StateNameRequiredError, self).__init__(message)
class InvalidStateNameError(ConfigurationError):
""" The state name was not valid. """
def __init__(self, machineName, stateName):
""" Initialize exception """
message = 'State name must match pattern "%s". (Machine %s, State %s)' % \
(constants.NAME_PATTERN, machineName, stateName)
super(InvalidStateNameError, self).__init__(message)
class StateNameNotUniqueError(ConfigurationError):
""" Each state within a machine must have a unique name. """
def __init__(self, machineName, stateName):
""" Initialize exception """
message = 'State names within a machine must be unique. (Machine %s, State %s)' % \
(machineName, stateName)
super(StateNameNotUniqueError, self).__init__(message)
class StateActionRequired(ConfigurationError):
""" Each state requires an action. """
def __init__(self, machineName, stateName):
""" Initialize exception """
message = '"%s" is required attribute of state. (Machine %s, State %s)' % \
(constants.STATE_ACTION_ATTRIBUTE, machineName, stateName)
super(StateActionRequired, self).__init__(message)
class UnknownModuleError(ConfigurationError):
""" When resolving actions, the module was not found. """
def __init__(self, moduleName, importError):
""" Initialize exception """
message = 'Module "%s" cannot be imported due to "%s".' % (moduleName, importError)
super(UnknownModuleError, self).__init__(message)
class UnknownClassError(ConfigurationError):
""" When resolving actions, the class was not found. """
def __init__(self, moduleName, className):
""" Initialize exception """
message = 'Class "%s" was not found in module "%s".' % (className, moduleName)
super(UnknownClassError, self).__init__(message)
class UnknownObjectError(ConfigurationError):
""" When resolving actions, the object was not found. """
def __init__(self, objectName):
""" Initialize exception """
message = 'Object "%s" was not found.' % (objectName)
super(UnknownObjectError, self).__init__(message)
class UnexpectedObjectTypeError(ConfigurationError):
""" When resolving actions, the object was not found. """
def __init__(self, objectName, expectedType):
""" Initialize exception """
message = 'Object "%s" is not of type "%s".' % (objectName, expectedType)
super(UnexpectedObjectTypeError, self).__init__(message)
class InvalidMaxRetriesError(ConfigurationError):
""" max_retries must be a positive integer. """
def __init__(self, machineName, maxRetries):
""" Initialize exception """
message = '%s "%s" is invalid. Must be an integer. (Machine %s)' % \
(constants.MAX_RETRIES_ATTRIBUTE, maxRetries, machineName)
super(InvalidMaxRetriesError, self).__init__(message)
class InvalidTaskRetryLimitError(ConfigurationError):
""" task_retry_limit must be a positive integer. """
def __init__(self, machineName, taskRetryLimit):
""" Initialize exception """
message = '%s "%s" is invalid. Must be an integer. (Machine %s)' % \
(constants.TASK_RETRY_LIMIT_ATTRIBUTE, taskRetryLimit, machineName)
super(InvalidTaskRetryLimitError, self).__init__(message)
class InvalidMinBackoffSecondsError(ConfigurationError):
""" min_backoff_seconds must be a positive integer. """
def __init__(self, machineName, minBackoffSeconds):
""" Initialize exception """
message = '%s "%s" is invalid. Must be an integer. (Machine %s)' % \
(constants.MIN_BACKOFF_SECONDS_ATTRIBUTE, minBackoffSeconds, machineName)
super(InvalidMinBackoffSecondsError, self).__init__(message)
class InvalidMaxBackoffSecondsError(ConfigurationError):
""" max_backoff_seconds must be a positive integer. """
def __init__(self, machineName, maxBackoffSeconds):
""" Initialize exception """
message = '%s "%s" is invalid. Must be an integer. (Machine %s)' % \
(constants.MAX_BACKOFF_SECONDS_ATTRIBUTE, maxBackoffSeconds, machineName)
super(InvalidMaxBackoffSecondsError, self).__init__(message)
class InvalidTaskAgeLimitError(ConfigurationError):
""" task_age_limit must be a positive integer. """
def __init__(self, machineName, taskAgeLimit):
""" Initialize exception """
message = '%s "%s" is invalid. Must be an integer. (Machine %s)' % \
(constants.TASK_AGE_LIMIT_ATTRIBUTE, taskAgeLimit, machineName)
super(InvalidTaskAgeLimitError, self).__init__(message)
class InvalidMaxDoublingsError(ConfigurationError):
""" max_doublings must be a positive integer. """
def __init__(self, machineName, maxDoublings):
""" Initialize exception """
message = '%s "%s" is invalid. Must be an integer. (Machine %s)' % \
(constants.MAX_DOUBLINGS_ATTRIBUTE, maxDoublings, machineName)
super(InvalidMaxDoublingsError, self).__init__(message)
class MaxRetriesAndTaskRetryLimitMutuallyExclusiveError(ConfigurationError):
""" max_retries and task_retry_limit cannot both be specified on a machine. """
def __init__(self, machineName):
""" Initialize exception """
message = 'max_retries and task_retry_limit cannot both be specified on a machine. (Machine %s)' % \
machineName
super(MaxRetriesAndTaskRetryLimitMutuallyExclusiveError, self).__init__(message)
class InvalidLoggingError(ConfigurationError):
""" The logging value was not valid. """
def __init__(self, machineName, loggingValue):
""" Initialize exception """
message = 'logging attribute "%s" is invalid (must be one of "%s"). (Machine %s)' % \
(loggingValue, constants.VALID_LOGGING_VALUES, machineName)
super(InvalidLoggingError, self).__init__(message)
class TransitionNameRequiredError(ConfigurationError):
""" Each transition requires a name. """
def __init__(self, machineName):
""" Initialize exception """
message = '"%s" is required attribute of transition. (Machine %s)' % \
(constants.TRANS_NAME_ATTRIBUTE, machineName)
super(TransitionNameRequiredError, self).__init__(message)
class InvalidTransitionNameError(ConfigurationError):
""" The transition name was invalid. """
def __init__(self, machineName, transitionName):
""" Initialize exception """
message = 'Transition name must match pattern "%s". (Machine %s, Transition %s)' % \
(constants.NAME_PATTERN, machineName, transitionName)
super(InvalidTransitionNameError, self).__init__(message)
class TransitionNameNotUniqueError(ConfigurationError):
""" Each transition within a machine must have a unique name. """
def __init__(self, machineName, transitionName):
""" Initialize exception """
message = 'Transition names within a machine must be unique. (Machine %s, Transition %s)' % \
(machineName, transitionName)
super(TransitionNameNotUniqueError, self).__init__(message)
class InvalidTransitionEventNameError(ConfigurationError):
""" The transition's event name was invalid. """
def __init__(self, machineName, fromStateName, eventName):
""" Initialize exception """
message = 'Transition event name must match pattern "%s". (Machine %s, State %s, Event %s)' % \
(constants.NAME_PATTERN, machineName, fromStateName, eventName)
super(InvalidTransitionEventNameError, self).__init__(message)
class TransitionUnknownToStateError(ConfigurationError):
""" Each transition must specify a to state. """
def __init__(self, machineName, transitionName, toState):
""" Initialize exception """
message = 'Transition to state is undefined. (Machine %s, Transition %s, To %s)' % \
(machineName, transitionName, toState)
super(TransitionUnknownToStateError, self).__init__(message)
class TransitionToRequiredError(ConfigurationError):
""" The specified to state is unknown. """
def __init__(self, machineName, transitionName):
""" Initialize exception """
message = '"%s" is required attribute of transition. (Machine %s, Transition %s)' % \
(constants.TRANS_TO_ATTRIBUTE, machineName, transitionName)
super(TransitionToRequiredError, self).__init__(message)
class TransitionEventRequiredError(ConfigurationError):
""" Each transition requires an event to be bound to. """
def __init__(self, machineName, fromStateName):
""" Initialize exception """
message = '"%s" is required attribute of transition. (Machine %s, State %s)' % \
(constants.TRANS_EVENT_ATTRIBUTE, machineName, fromStateName)
super(TransitionEventRequiredError, self).__init__(message)
class InvalidCountdownError(ConfigurationError):
""" Countdown must be a positive integer. """
def __init__(self, countdown, machineName, fromStateName):
""" Initialize exception """
message = 'Countdown "%s" must be a positive integer. (Machine %s, State %s)' % \
(countdown, machineName, fromStateName)
super(InvalidCountdownError, self).__init__(message)
class InvalidMachineAttributeError(ConfigurationError):
""" Unknown machine attributes were found. """
def __init__(self, machineName, badAttributes):
""" Initialize exception """
message = 'The following are invalid attributes a machine: %s. (Machine %s)' % \
(badAttributes, machineName)
super(InvalidMachineAttributeError, self).__init__(message)
class InvalidStateAttributeError(ConfigurationError):
""" Unknown state attributes were found. """
def __init__(self, machineName, stateName, badAttributes):
""" Initialize exception """
message = 'The following are invalid attributes a state: %s. (Machine %s, State %s)' % \
(badAttributes, machineName, stateName)
super(InvalidStateAttributeError, self).__init__(message)
class InvalidTransitionAttributeError(ConfigurationError):
""" Unknown transition attributes were found. """
def __init__(self, machineName, fromStateName, badAttributes):
""" Initialize exception """
message = 'The following are invalid attributes a transition: %s. (Machine %s, State %s)' % \
(badAttributes, machineName, fromStateName)
super(InvalidTransitionAttributeError, self).__init__(message)
class InvalidInterfaceError(ConfigurationError):
""" Interface errors. """
pass
class InvalidContinuationInterfaceError(InvalidInterfaceError):
""" The specified state was denoted as a continuation, but it does not have a continuation method. """
def __init__(self, machineName, stateName):
message = 'The state was specified as continuation=True, but the action class does not have a ' + \
'continuation() method. (Machine %s, State %s)' % (machineName, stateName)
super(InvalidContinuationInterfaceError, self).__init__(message)
class InvalidActionInterfaceError(InvalidInterfaceError):
""" The specified state's action class does not have an execute() method. """
def __init__(self, machineName, stateName):
message = 'The state\'s action class does not have an execute() method. (Machine %s, State %s)' % \
(machineName, stateName)
super(InvalidActionInterfaceError, self).__init__(message)
class InvalidEntryInterfaceError(InvalidInterfaceError):
""" The specified state's entry class does not have an execute() method. """
def __init__(self, machineName, stateName):
message = 'The state\'s entry class does not have an execute() method. (Machine %s, State %s)' % \
(machineName, stateName)
super(InvalidEntryInterfaceError, self).__init__(message)
class InvalidExitInterfaceError(InvalidInterfaceError):
""" The specified state's exit class does not have an execute() method. """
def __init__(self, machineName, stateName):
message = 'The state\'s exit class does not have an execute() method. (Machine %s, State %s)' % \
(machineName, stateName)
super(InvalidExitInterfaceError, self).__init__(message)
class InvalidFanInError(ConfigurationError):
""" fan_in must be a positive integer. """
def __init__(self, machineName, stateName, fanInPeriod):
""" Initialize exception """
message = '%s "%s" is invalid. Must be an integer. (Machine %s, State %s)' % \
(constants.STATE_FAN_IN_ATTRIBUTE, fanInPeriod, machineName, stateName)
super(InvalidFanInError, self).__init__(message)
class FanInContinuationNotSupportedError(ConfigurationError):
""" Cannot have fan_in and continuation on the same state, because it hurts our head at the moment. """
def __init__(self, machineName, stateName):
""" Initialize exception """
message = '%s and %s are not supported on the same state. Maybe some day... (Machine %s, State %s)' % \
(constants.STATE_CONTINUATION_ATTRIBUTE, constants.STATE_FAN_IN_ATTRIBUTE,
machineName, stateName)
super(FanInContinuationNotSupportedError, self).__init__(message)
class UnsupportedConfigurationError(ConfigurationError):
""" Some exit and transition actions are not allowed near fan_in and continuation. At least not at the moment. """
def __init__(self, machineName, stateName, message):
""" Initialize exception """
message = '%s (Machine %s, State %s)' % (message, machineName, stateName)
super(UnsupportedConfigurationError, self).__init__(message) | unknown | codeparrot/codeparrot-clean | ||
# For this example, we're going to write a simple momentum script. When the
# stock goes up quickly, we're going to buy; when it goes down quickly, we're
# going to sell. Hopefully we'll ride the waves.
import os
import tempfile
import time
import pandas as pd
import talib
from logbook import Logger
from catalyst import run_algorithm
from catalyst.api import symbol, record, order_target_percent, get_dataset
from catalyst.exchange.utils.stats_utils import set_print_settings, \
get_pretty_stats
# We give a name to the algorithm which Catalyst will use to persist its state.
# In this example, Catalyst will create the `.catalyst/data/live_algos`
# directory. If we stop and start the algorithm, Catalyst will resume its
# state using the files included in the folder.
from catalyst.utils.paths import ensure_directory
NAMESPACE = 'mean_reversion_simple'
log = Logger(NAMESPACE)
# To run an algorithm in Catalyst, you need two functions: initialize and
# handle_data.
def initialize(context):
# This initialize function sets any data or variables that you'll use in
# your algorithm. For instance, you'll want to define the trading pair (or
# trading pairs) you want to backtest. You'll also want to define any
# parameters or values you're going to use.
# In our example, we're looking at Neo in Ether.
df = get_dataset('testmarketcap2') # type: pd.DataFrame
# Picking a specific date in our DataFrame
first_dt = df.index.get_level_values(0)[0]
# Since we use a MultiIndex with date / symbol, picking a date will
# result in a new DataFrame for the selected date with a single
# symbol index
df = df.xs(first_dt, level=0)
# Keep only the top coins by market cap
df = df.loc[df['market_cap_usd'].isin(df['market_cap_usd'].nlargest(100))]
set_print_settings()
df.sort_values(by=['market_cap_usd'], ascending=True, inplace=True)
print('the marketplace data:\n{}'.format(df))
# Pick the 5 assets with the lowest market cap for trading
quote_currency = 'eth'
exchange = context.exchanges[next(iter(context.exchanges))]
symbols = [a.symbol for a in exchange.assets
if a.start_date < context.datetime]
context.assets = []
for currency, price in df['market_cap_usd'].iteritems():
if len(context.assets) >= 5:
break
s = '{}_{}'.format(currency.decode('utf-8'), quote_currency)
if s in symbols:
context.assets.append(symbol(s))
context.base_price = None
context.current_day = None
context.RSI_OVERSOLD = 55
context.RSI_OVERBOUGHT = 60
context.CANDLE_SIZE = '5T'
context.start_time = time.time()
def handle_data(context, data):
# This handle_data function is where the real work is done. Our data is
# minute-level tick data, and each minute is called a frame. This function
# runs on each frame of the data.
# We flag the first period of each day.
# Since cryptocurrencies trade 24/7 the `before_trading_starts` handle
# would only execute once. This method works with minute and daily
# frequencies.
today = data.current_dt.floor('1D')
if today != context.current_day:
context.traded_today = dict()
context.current_day = today
# Preparing dictionaries for asset-level data points
volumes = dict()
rsis = dict()
price_values = dict()
cash = context.portfolio.cash
for asset in context.assets:
# We're computing the volume-weighted-average-price of the security
# defined above, in the context.assets variable. For this example,
# we're using three bars on the 15 min bars.
# The frequency attribute determine the bar size. We use this
# convention for the frequency alias:
# http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
prices = data.history(
asset,
fields='close',
bar_count=50,
frequency=context.CANDLE_SIZE
)
# Ta-lib calculates various technical indicator based on price and
# volume arrays.
# In this example, we are comp
rsi = talib.RSI(prices.values, timeperiod=14)
# We need a variable for the current price of the security to compare
# to the average. Since we are requesting two fields, data.current()
# returns a DataFrame with
current = data.current(asset, fields=['close', 'volume'])
price = current['close']
# If base_price is not set, we use the current value. This is the
# price at the first bar which we reference to calculate price_change.
# if asset not in context.base_price:
# context.base_price[asset] = price
#
# base_price = context.base_price[asset]
# price_change = (price - base_price) / base_price
# Tracking the relevant data
volumes[asset] = current['volume']
rsis[asset] = rsi[-1]
price_values[asset] = price
# price_changes[asset] = price_change
# We are trying to avoid over-trading by limiting our trades to
# one per day.
if asset in context.traded_today:
continue
# Exit if we cannot trade
if not data.can_trade(asset):
continue
# Another powerful built-in feature of the Catalyst backtester is the
# portfolio object. The portfolio object tracks your positions, cash,
# cost basis of specific holdings, and more. In this line, we
# calculate how long or short our position is at this minute.
pos_amount = context.portfolio.positions[asset].amount
if rsi[-1] <= context.RSI_OVERSOLD and pos_amount == 0:
log.info(
'{}: buying - price: {}, rsi: {}'.format(
data.current_dt, price, rsi[-1]
)
)
# Set a style for limit orders,
limit_price = price * 1.005
target = 1.0 / len(context.assets)
order_target_percent(
asset, target, limit_price=limit_price
)
context.traded_today[asset] = True
elif rsi[-1] >= context.RSI_OVERBOUGHT and pos_amount > 0:
log.info(
'{}: selling - price: {}, rsi: {}'.format(
data.current_dt, price, rsi[-1]
)
)
limit_price = price * 0.995
order_target_percent(
asset, 0, limit_price=limit_price
)
context.traded_today[asset] = True
# Now that we've collected all current data for this frame, we use
# the record() method to save it. This data will be available as
# a parameter of the analyze() function for further analysis.
record(
current_price=price_values,
volume=volumes,
rsi=rsis,
cash=cash,
)
def analyze(context=None, perf=None):
stats = get_pretty_stats(perf)
print('the algo stats:\n{}'.format(stats))
pass
if __name__ == '__main__':
# The execution mode: backtest or live
live = False
if live:
run_algorithm(
capital_base=0.1,
initialize=initialize,
handle_data=handle_data,
analyze=analyze,
exchange_name='poloniex',
live=True,
algo_namespace=NAMESPACE,
quote_currency='btc',
live_graph=False,
simulate_orders=False,
stats_output=None,
)
else:
folder = os.path.join(
tempfile.gettempdir(), 'catalyst', NAMESPACE
)
ensure_directory(folder)
timestr = time.strftime('%Y%m%d-%H%M%S')
out = os.path.join(folder, '{}.p'.format(timestr))
# catalyst run -f catalyst/examples/mean_reversion_simple.py \
# -x bitfinex -s 2017-10-1 -e 2017-11-10 -c usdt -n mean-reversion \
# --data-frequency minute --capital-base 10000
run_algorithm(
capital_base=100,
data_frequency='minute',
initialize=initialize,
handle_data=handle_data,
analyze=analyze,
exchange_name='poloniex',
algo_namespace=NAMESPACE,
quote_currency='eth',
start=pd.to_datetime('2017-10-01', utc=True),
end=pd.to_datetime('2017-10-15', utc=True),
)
log.info('saved perf stats: {}'.format(out)) | unknown | codeparrot/codeparrot-clean | ||
name: unstable-periodic
on:
schedule:
- cron: 45 0,4,8,12,16,20 * * *
- cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests
push:
tags:
- ciflow/unstable/*
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}-${{ github.event.schedule }}
cancel-in-progress: true
permissions: read-all
jobs:
# There must be at least one job here to satisfy GitHub action workflow syntax
introduction:
if: github.repository_owner == 'pytorch'
runs-on: ubuntu-latest
continue-on-error: true
steps:
- name: Introduce PyTorch unstable (periodic) workflow
run: |
echo "PyTorch unstable workflow is used to host experimental or flaky jobs"
echo " that needs to be run periodically, but doesn't impact trunk as part"
echo " of the stable periodic workflows."
echo
echo "In addition, a new label called ciflow/unstable can be attached to the"
echo " PR to trigger this workflow. That can be done either manually or"
echo " automatically using PyTorch auto-label bot."
echo
echo "Once the jobs are deemed stable enough (% red signal < 5% and TTS < 3h),"
echo " they can graduate and move back to periodic." | unknown | github | https://github.com/pytorch/pytorch | .github/workflows/unstable-periodic.yml |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2014-12-10 01:34:09
import os
import sys
import time
import click
import shutil
import inspect
import unittest2 as unittest
from pyspider import run
from pyspider.libs import utils
class TestBench(unittest.TestCase):
@classmethod
def setUpClass(self):
shutil.rmtree('./data/bench', ignore_errors=True)
os.makedirs('./data/bench')
@classmethod
def tearDownClass(self):
shutil.rmtree('./data/bench', ignore_errors=True)
def test_10_bench(self):
import subprocess
#cmd = [sys.executable]
cmd = ['coverage', 'run']
p = subprocess.Popen(cmd+[
inspect.getsourcefile(run),
'--queue-maxsize=0',
'bench',
'--total=500'
], close_fds=True, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
stderr = utils.text(stderr)
print(stderr)
self.assertEqual(p.returncode, 0, stderr)
self.assertIn('Crawled', stderr)
self.assertIn('Fetched', stderr)
self.assertIn('Processed', stderr)
self.assertIn('Saved', stderr) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import db
from nova import exception
from nova.objects import instance_fault
from nova.tests.objects import test_objects
fake_faults = {
'fake-uuid': [
{'id': 1, 'instance_uuid': 'fake-uuid', 'code': 123, 'message': 'msg1',
'details': 'details', 'host': 'host', 'deleted': False,
'created_at': None, 'updated_at': None, 'deleted_at': None},
{'id': 2, 'instance_uuid': 'fake-uuid', 'code': 456, 'message': 'msg2',
'details': 'details', 'host': 'host', 'deleted': False,
'created_at': None, 'updated_at': None, 'deleted_at': None},
]
}
class _TestInstanceFault(object):
def test_get_latest_for_instance(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
).AndReturn(fake_faults)
self.mox.ReplayAll()
fault = instance_fault.InstanceFault.get_latest_for_instance(
self.context, 'fake-uuid')
for key in fake_faults['fake-uuid'][0]:
self.assertEqual(fake_faults['fake-uuid'][0][key], fault[key])
def test_get_latest_for_instance_with_none(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
).AndReturn({})
self.mox.ReplayAll()
fault = instance_fault.InstanceFault.get_latest_for_instance(
self.context, 'fake-uuid')
self.assertIsNone(fault)
def test_get_by_instance(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
).AndReturn(fake_faults)
self.mox.ReplayAll()
faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
self.context, ['fake-uuid'])
for index, db_fault in enumerate(fake_faults['fake-uuid']):
for key in db_fault:
self.assertEqual(fake_faults['fake-uuid'][index][key],
faults[index][key])
def test_get_by_instance_with_none(self):
self.mox.StubOutWithMock(db, 'instance_fault_get_by_instance_uuids')
db.instance_fault_get_by_instance_uuids(self.context, ['fake-uuid']
).AndReturn({})
self.mox.ReplayAll()
faults = instance_fault.InstanceFaultList.get_by_instance_uuids(
self.context, ['fake-uuid'])
self.assertEqual(0, len(faults))
@mock.patch('nova.cells.rpcapi.CellsAPI.instance_fault_create_at_top')
@mock.patch('nova.db.instance_fault_create')
def _test_create(self, update_cells, mock_create, cells_fault_create):
mock_create.return_value = fake_faults['fake-uuid'][1]
fault = instance_fault.InstanceFault()
fault.instance_uuid = 'fake-uuid'
fault.code = 456
fault.message = 'foo'
fault.details = 'you screwed up'
fault.host = 'myhost'
fault.create(self.context)
self.assertEqual(2, fault.id)
mock_create.assert_called_once_with(self.context,
{'instance_uuid': 'fake-uuid',
'code': 456,
'message': 'foo',
'details': 'you screwed up',
'host': 'myhost'})
if update_cells:
cells_fault_create.assert_called_once_with(
self.context, fake_faults['fake-uuid'][1])
else:
self.assertFalse(cells_fault_create.called)
def test_create_no_cells(self):
self.flags(enable=False, group='cells')
self._test_create(False)
def test_create_api_cell(self):
self.flags(cell_type='api', enable=True, group='cells')
self._test_create(False)
def test_create_compute_cell(self):
self.flags(cell_type='compute', enable=True, group='cells')
self._test_create(True)
def test_create_already_created(self):
fault = instance_fault.InstanceFault()
fault.id = 1
self.assertRaises(exception.ObjectActionError,
fault.create, self.context)
class TestInstanceFault(test_objects._LocalTest,
_TestInstanceFault):
pass
class TestInstanceFaultRemote(test_objects._RemoteTest,
_TestInstanceFault):
pass | unknown | codeparrot/codeparrot-clean | ||
from test_base import MainTestCase
import os
from odk_logger.models import XForm
from unittest.case import skip
class TestUnique(MainTestCase):
@skip("DB Fields are wrong")
def test_unique_together(self):
"""
Multiple users can have the same survey, but id_strings of
surveys must be unique for a single user.
"""
self._create_user_and_login()
self.this_directory = os.path.dirname(__file__)
xls_path = os.path.join(self.this_directory, "fixtures", "gps", "gps.xls")
# first time
response = self._publish_xls_file(xls_path)
self.assertEqual(response.status_code, 200)
self.assertEquals(XForm.objects.count(), 1)
# second time
response = self._publish_xls_file(xls_path)
self.assertEqual(response.status_code, 200)
self.assertEquals(XForm.objects.count(), 2)
self.client.logout()
# first time
self._create_user_and_login(username="carl", password="carl")
response = self._publish_xls_file(xls_path)
self.assertEquals(XForm.objects.count(), 3) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occurred.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
import os
import json
import getshorty
import unittest
import tempfile
URL = 'http://google.com'
URL_MOBILE = 'http://facebook.com'
URL_TABLET = 'https://yahoo.com'
BAD_URL = 'http//google.com'
UA = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'
UA_MOBILE = 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25'
UA_TABLET = 'Mozilla/5.0(iPad; U; CPU iPhone OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B314 Safari/531.21.10'
class GetShortyTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, getshorty.app.config['DATABASE'] = tempfile.mkstemp()
getshorty.app.config['TESTING'] = True
self.app = getshorty.app.test_client()
with getshorty.app.app_context():
getshorty.init_db()
def tearDown(self):
os.close(self.db_fd)
os.unlink(getshorty.app.config['DATABASE'])
def test_empty_db(self):
resp = self.app.get('/api/1.0/list')
assert b'[]' in resp.data
def test_bad_methods(self):
resp = self.app.post('/api/1.0/list')
resp2 = self.app.get('/api/1.0/create')
assert resp.status_code == 405
assert resp2.status_code == 405
def test_create_empty(self):
resp = self.app.post('/api/1.0/create', data='{}')
assert b'"url parameter is mandatory' in resp.data
def test_create_badurl(self):
resp = self.app.post(
'/api/1.0/create', data=json.dumps(dict(url=BAD_URL)))
assert b'"invalid url"' in resp.data
def test_create_single(self):
resp = self.app.post(
'/api/1.0/create', data=json.dumps(dict(url=URL)))
assert resp.status_code == 201
short_url = json.loads(resp.data.decode('utf-8'))['shorten']
resp2 = self.app.get(short_url)
assert resp2.status_code == 302
def test_create_complete(self):
resp = self.app.post(
'/api/1.0/create', data='{"url":"%s","url-mobile":"%s","url-tablet":"%s"}' % (URL, URL_MOBILE, URL_TABLET))
assert resp.status_code == 201
short_url = json.loads(resp.data.decode('utf-8'))['shorten']
resp_default = self.app.get(short_url, environ_base={
'HTTP_USER_AGENT': UA})
assert resp_default.status_code == 302
assert resp_default.location == URL
resp_mobile = self.app.get(short_url, environ_base={
'HTTP_USER_AGENT': UA_MOBILE})
assert resp_mobile.status_code == 302
assert resp_mobile.location == URL_MOBILE
resp_tablet = self.app.get(short_url, environ_base={
'HTTP_USER_AGENT': UA_TABLET})
assert resp_tablet.status_code == 302
assert resp_tablet.location == URL_TABLET
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# MinIO Python Library for Amazon S3 Compatible Cloud Storage,
# (C) 2015-2020 MinIO, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import mock
from nose.tools import eq_, timed
from minio import Minio
from minio.api import _DEFAULT_USER_AGENT
from .minio_mocks import MockConnection, MockResponse
class ListObjectsV1Test(TestCase):
@mock.patch('urllib3.PoolManager')
def test_empty_list_objects_works(self, mock_connection):
mock_data = '''<?xml version="1.0"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<IsTruncated>false</IsTruncated>
<MaxKeys>1000</MaxKeys>
<Delimiter/>
</ListBucketResult>'''
mock_server = MockConnection()
mock_connection.return_value = mock_server
mock_server.mock_add_request(
MockResponse(
"GET",
"https://localhost:9000/bucket?delimiter=&encoding-type=url"
"&max-keys=1000&prefix=",
{"User-Agent": _DEFAULT_USER_AGENT},
200,
content=mock_data.encode(),
),
)
client = Minio('localhost:9000')
bucket_iter = client.list_objects(
'bucket', recursive=True, use_api_v1=True,
)
buckets = []
for bucket in bucket_iter:
buckets.append(bucket)
eq_(0, len(buckets))
@timed(1)
@mock.patch('urllib3.PoolManager')
def test_list_objects_works(self, mock_connection):
mock_data = '''<?xml version="1.0"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<Delimiter/>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>key1</Key>
<LastModified>2015-05-05T02:21:15.716Z</LastModified>
<ETag>5eb63bbbe01eeed093cb22bb8f5acdc3</ETag>
<Size>11</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>minio</ID>
<DisplayName>minio</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>key2</Key>
<LastModified>2015-05-05T20:36:17.498Z</LastModified>
<ETag>2a60eaffa7a82804bdc682ce1df6c2d4</ETag>
<Size>1661</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>minio</ID>
<DisplayName>minio</DisplayName>
</Owner>
</Contents>
</ListBucketResult>'''
mock_server = MockConnection()
mock_connection.return_value = mock_server
mock_server.mock_add_request(
MockResponse(
"GET",
"https://localhost:9000/bucket?delimiter=%2F&encoding-type=url"
"&max-keys=1000&prefix=",
{"User-Agent": _DEFAULT_USER_AGENT},
200,
content=mock_data.encode(),
),
)
client = Minio('localhost:9000')
bucket_iter = client.list_objects('bucket', use_api_v1=True)
buckets = []
for bucket in bucket_iter:
# cause an xml exception and fail if we try retrieving again
mock_server.mock_add_request(
MockResponse(
"GET",
"https://localhost:9000/bucket?delimiter=%2F&encoding-type=url"
"&max-keys=1000&prefix=",
{"User-Agent": _DEFAULT_USER_AGENT},
200,
content=b"",
),
)
buckets.append(bucket)
eq_(2, len(buckets))
@timed(1)
@mock.patch('urllib3.PoolManager')
def test_list_objects_works_well(self, mock_connection):
mock_data1 = '''<?xml version="1.0"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker />
<NextMarker>marker</NextMarker>
<MaxKeys>1000</MaxKeys>
<Delimiter/>
<IsTruncated>true</IsTruncated>
<Contents>
<Key>key1</Key>
<LastModified>2015-05-05T02:21:15.716Z</LastModified>
<ETag>5eb63bbbe01eeed093cb22bb8f5acdc3</ETag>
<Size>11</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>minio</ID>
<DisplayName>minio</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>key2</Key>
<LastModified>2015-05-05T20:36:17.498Z</LastModified>
<ETag>2a60eaffa7a82804bdc682ce1df6c2d4</ETag>
<Size>1661</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>minio</ID>
<DisplayName>minio</DisplayName>
</Owner>
</Contents>
</ListBucketResult>'''
mock_data2 = '''<?xml version="1.0"?>
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Name>bucket</Name>
<Prefix/>
<Marker/>
<MaxKeys>1000</MaxKeys>
<Delimiter/>
<IsTruncated>false</IsTruncated>
<Contents>
<Key>key3</Key>
<LastModified>2015-05-05T02:21:15.716Z</LastModified>
<ETag>5eb63bbbe01eeed093cb22bb8f5acdc3</ETag>
<Size>11</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>minio</ID>
<DisplayName>minio</DisplayName>
</Owner>
</Contents>
<Contents>
<Key>key4</Key>
<LastModified>2015-05-05T20:36:17.498Z</LastModified>
<ETag>2a60eaffa7a82804bdc682ce1df6c2d4</ETag>
<Size>1661</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>minio</ID>
<DisplayName>minio</DisplayName>
</Owner>
</Contents>
</ListBucketResult>'''
mock_server = MockConnection()
mock_connection.return_value = mock_server
mock_server.mock_add_request(
MockResponse(
"GET",
"https://localhost:9000/bucket?delimiter=&encoding-type=url"
"&max-keys=1000&prefix=",
{"User-Agent": _DEFAULT_USER_AGENT},
200,
content=mock_data1.encode(),
),
)
client = Minio('localhost:9000')
bucket_iter = client.list_objects(
'bucket', recursive=True, use_api_v1=True,
)
buckets = []
for bucket in bucket_iter:
mock_server.mock_add_request(
MockResponse(
"GET",
"https://localhost:9000/bucket?delimiter=&encoding-type=url"
"&marker=marker&max-keys=1000&prefix=",
{"User-Agent": _DEFAULT_USER_AGENT},
200,
content=mock_data2.encode(),
),
)
buckets.append(bucket)
eq_(4, len(buckets)) | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides pluggable interface for enforcing client quotas from a Kafka server.
*/
package org.apache.kafka.server.quota; | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/server/quota/package-info.java |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import http
from mox3.mox import IsA # noqa
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.volumes.snapshots import forms
INDEX_URL = reverse('horizon:admin:volumes:volumes_tab')
class VolumeViewTests(test.BaseAdminViewTests):
@test.create_stubs({cinder: ('volume_reset_state',
'volume_get')})
def test_update_volume_status(self):
volume = self.volumes.first()
formData = {'status': 'error'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_reset_state(IsA(http.HttpRequest),
volume.id,
formData['status'])
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volumes:update_status',
args=(volume.id,)),
formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_manage',
'volume_type_list',
'availability_zone_list',
'extension_supported')})
def test_manage_volume(self):
metadata = {'key': u'k1',
'value': u'v1'}
formData = {'host': 'host-1',
'identifier': 'vol-1',
'id_type': u'source-name',
'name': 'name-1',
'description': 'manage a volume',
'volume_type': 'vol_type_1',
'availability_zone': 'nova',
'metadata': metadata['key'] + '=' + metadata['value'],
'bootable': False}
cinder.volume_type_list(
IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.availability_zone_list(
IsA(http.HttpRequest)).\
AndReturn(self.availability_zones.list())
cinder.extension_supported(
IsA(http.HttpRequest),
'AvailabilityZones').\
AndReturn(True)
cinder.volume_manage(
IsA(http.HttpRequest),
host=formData['host'],
identifier=formData['identifier'],
id_type=formData['id_type'],
name=formData['name'],
description=formData['description'],
volume_type=formData['volume_type'],
availability_zone=formData['availability_zone'],
metadata={metadata['key']: metadata['value']},
bootable=formData['bootable'])
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volumes:manage'),
formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_unmanage',
'volume_get')})
def test_unmanage_volume(self):
# important - need to get the v2 cinder volume which has host data
volume_list = \
filter(lambda x: x.name == 'v2_volume', self.cinder_volumes.list())
volume = volume_list[0]
formData = {'volume_name': volume.name,
'host_name': 'host@backend-name#pool',
'volume_id': volume.id}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_unmanage(IsA(http.HttpRequest), volume.id).\
AndReturn(volume)
self.mox.ReplayAll()
res = self.client.post(
reverse('horizon:admin:volumes:volumes:unmanage',
args=(volume.id,)),
formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('pool_list',
'volume_get',)})
def test_volume_migrate_get(self):
volume = self.cinder_volumes.get(name='v2_volume')
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
cinder.pool_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:volumes:migrate',
args=[volume.id])
res = self.client.get(url)
self.assertTemplateUsed(res,
'admin/volumes/volumes/migrate_volume.html')
@test.create_stubs({cinder: ('volume_get',)})
def test_volume_migrate_get_volume_get_exception(self):
volume = self.cinder_volumes.get(name='v2_volume')
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:volumes:migrate',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({cinder: ('pool_list',
'volume_get',)})
def test_volume_migrate_list_pool_get_exception(self):
volume = self.cinder_volumes.get(name='v2_volume')
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
cinder.pool_list(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:volumes:migrate',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({cinder: ('pool_list',
'volume_get',
'volume_migrate',)})
def test_volume_migrate_post(self):
volume = self.cinder_volumes.get(name='v2_volume')
host = self.pools.first().name
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
cinder.pool_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
cinder.volume_migrate(IsA(http.HttpRequest),
volume.id,
host,
False) \
.AndReturn(None)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:volumes:migrate',
args=[volume.id])
res = self.client.post(url, {'host': host, 'volume_id': volume.id})
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({cinder: ('pool_list',
'volume_get',
'volume_migrate',)})
def test_volume_migrate_post_api_exception(self):
volume = self.cinder_volumes.get(name='v2_volume')
host = self.pools.first().name
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
cinder.pool_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
cinder.volume_migrate(IsA(http.HttpRequest),
volume.id,
host,
False) \
.AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:volumes:migrate',
args=[volume.id])
res = self.client.post(url, {'host': host, 'volume_id': volume.id})
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_get_volume_status_choices_without_current(self):
current_status = {'status': 'available'}
status_choices = forms.populate_status_choices(current_status,
forms.STATUS_CHOICES)
self.assertEqual(len(status_choices), len(forms.STATUS_CHOICES))
self.assertNotIn(current_status['status'],
[status[0] for status in status_choices])
@test.create_stubs({cinder: ('volume_get',)})
def test_update_volume_status_get(self):
volume = self.cinder_volumes.get(name='v2_volume')
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:volumes:update_status',
args=[volume.id])
res = self.client.get(url)
status_option = "<option value=\"%s\"></option>" % volume.status
self.assertNotContains(res, status_option) | unknown | codeparrot/codeparrot-clean | ||
import { Content } from "@prismicio/client";
import PostPreview from "../components/post-preview";
type MoreStoriesProps = {
posts: Content.PostDocument[];
};
export default function MoreStories({ posts }: MoreStoriesProps) {
return (
<section>
<h2 className="mb-8 text-6xl md:text-7xl font-bold tracking-tighter leading-tight">
More Stories
</h2>
<div className="grid grid-cols-1 md:grid-cols-2 md:gap-x-16 lg:gap-x-32 gap-y-20 md:gap-y-32 mb-32">
{posts.map((post) => (
<PostPreview
key={post.uid}
href={post.url}
title={post.data.title}
coverImage={post.data.cover_image}
date={post.data.date}
author={post.data.author}
excerpt={post.data.excerpt}
/>
))}
</div>
</section>
);
} | typescript | github | https://github.com/vercel/next.js | examples/cms-prismic/components/more-stories.tsx |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import (
TestCase, ignore_warnings, modify_settings, override_settings,
)
from django.test.utils import patch_logger
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[
('en', 'English'),
],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='testclient@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='testclient2@example.com', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='staffmember@example.com', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='empty_password@example.com',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='unmanageable_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='unknown_password@example.com', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': 'not_a_real_email@email.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_extra_email_context(self):
"""
extra_email_context should be available in the email template context.
"""
response = self.client.post(
'/password_reset_extra_email_context/',
{'email': 'staffmember@example.com'},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Email email context: "Hello!"', mail.outbox[0].body)
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("staffmember@example.com", mail.outbox[0].from_email)
@ignore_warnings(category=RemovedInDjango110Warning)
@override_settings(ALLOWED_HOSTS=['adminsite.com'])
def test_admin_reset(self):
"If the reset view is marked as being for admin, the HTTP_HOST header is used for a domain override."
response = self.client.post('/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='adminsite.com'
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://adminsite.com", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': 'staffmember@example.com'},
HTTP_HOST='www.example:dr.frankenstein@evil.tld'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': 'staffmember@example.com'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='staffmember@example.com')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': 'staffmember@example.com'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='staffmember@example.com').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = 'staffmember@example.com'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), email='staffmember@example.com', is_active=True,
is_admin=False, date_of_birth=datetime.date(1976, 11, 8)
)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super(UUIDUserPasswordResetTest, self)._test_confirm_start()
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@modify_settings(MIDDLEWARE_CLASSES={
'append': 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
})
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls_admin',
)
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
force_text(response.content)
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='staffmember@example.com')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@override_settings(
AUTH_USER_MODEL='auth.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='foo@bar.com', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # harcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.') | unknown | codeparrot/codeparrot-clean | ||
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyCython(PythonPackage):
"""The Cython compiler for writing C extensions for the Python language."""
homepage = "https://pypi.python.org/pypi/cython"
url = "https://pypi.io/packages/source/c/cython/Cython-0.25.2.tar.gz"
version('0.25.2', '642c81285e1bb833b14ab3f439964086')
version('0.23.5', '66b62989a67c55af016c916da36e7514')
version('0.23.4', '157df1f69bcec6b56fd97e0f2e057f6e')
# These versions contain illegal Python3 code...
version('0.22', '1ae25add4ef7b63ee9b4af697300d6b6')
version('0.21.2', 'd21adb870c75680dc857cd05d41046a4') | unknown | codeparrot/codeparrot-clean | ||
"""retrive local user settings"""
from ConfigParser import SafeConfigParser, NoOptionError
import os as os
from os.path import join, split, abspath, isfile, expanduser
from sys import modules
self = modules[__name__]
# define various filepaths
omelib_directory = join(split(abspath(__file__))[0], "")
ome_directory = join(abspath(join(omelib_directory, "..")), "")
def which(program):
"""returns path to an executable if it is found in the path"""
fpath, fname = split(program)
if fpath:
if isfile(program) and os.access(program, os.X_OK):
return program
else:
paths_to_search = os.environ["PATH"].split(os.pathsep)
paths_to_search.extend((omelib_directory, ome_directory))
for path in paths_to_search:
exe_file = join(path, program)
if isfile(exe_file) and os.access(exe_file, os.X_OK):
return exe_file
if os.name == "nt" and not program.endswith(".exe"):
return which(program + ".exe")
return None
def _escape_space(program):
"""escape spaces in for windows"""
if os.name == "nt" and ' ' in program:
return '"' + program + '"'
else:
return program
config = SafeConfigParser()
# set the default settings
config.add_section("DATABASE")
config.set("DATABASE", "postgres_host", "localhost")
config.set("DATABASE", "postgres_port", "5432")
config.set("DATABASE", "postgres_database", "ome_stage_2")
config.set("DATABASE", "postgres_user", "dbuser")
config.set("DATABASE", "postgres_password", "")
config.set("DATABASE", "postgres_test_database", "ome_test")
config.add_section("DATA")
config.add_section("EXECUTABLES")
# overwrite defaults settings with settings from the file
def load_settings_from_file(filepath="settings.ini", in_omelib=True):
"""Reload settings from a different settings file.
Arguments
---------
filepath: The path to the settings file to use.
in_omelib: Whether or not the path given is a relative path from the omelib
directory.
"""
if in_omelib:
filepath = join(omelib_directory, filepath)
config.read(filepath)
# attempt to intellegently determine more difficult settings
if not config.has_option("DATABASE", "user"):
if "USERNAME" in os.environ: # windows
user = os.environ["USERNAME"]
elif "USER" in os.environ: # unix
user = os.environ["USER"]
config.set("DATABASE", "user", user)
# executables
if not config.has_option("EXECUTABLES", "psql"):
psql = which("psql91")
if psql is None:
psql = which("psql")
config.set("EXECUTABLES", "psql", psql)
if not config.has_option("EXECUTABLES", "R"):
R = which("R")
config.set("EXECUTABLES", "R", R)
if not config.has_option("EXECUTABLES", "Rscript"):
Rscript = which("Rscript")
config.set("EXECUTABLES", "Rscript", Rscript)
if not config.has_option("EXECUTABLES", "primer3"):
primer3 = which("primer3_core")
config.set("EXECUTABLES", "primer3", primer3)
if not config.has_option("EXECUTABLES", "cufflinks"):
cufflinks = which("cufflinks")
config.set("EXECUTABLES", "cufflinks", cufflinks)
if not config.has_option("EXECUTABLES", "java"):
java = which("java")
config.set("EXECUTABLES", "java", java)
# save options as variables
self.postgres_user = config.get("DATABASE", "postgres_user")
self.postgres_password = config.get("DATABASE", "postgres_password")
if len(self.postgres_password) > 0:
os.environ["PGPASSWORD"] = self.postgres_password
self.postgres_database = config.get("DATABASE", "postgres_database")
self.postgres_host = config.get("DATABASE", "postgres_host")
self.postgres_port = config.get("DATABASE", "postgres_port")
self.postgres_test_database = config.get("DATABASE", "postgres_test_database")
self.psql = _escape_space(config.get("EXECUTABLES", "psql"))
self.R = _escape_space(config.get("EXECUTABLES", "R"))
self.Rscript = _escape_space(config.get("EXECUTABLES", "Rscript"))
self.primer3 = _escape_space(config.get("EXECUTABLES", "primer3"))
self.cufflinks = config.get("EXECUTABLES", "cufflinks")
self.java = config.get("EXECUTABLES", "java")
# make a psql string with the database options included
self.psql_full = "%s --host=%s --username=%s --port=%s " % \
(self.psql, self.postgres_host, self.postgres_user, self.postgres_port)
try:
self.data_directory = expanduser(config.get('DATA', 'data_directory'))
except NoOptionError:
raise Exception('data_directory was not supplied in settings.ini')
# set default here, after getting the data directory
try:
self.model_genome = expanduser(config.get('DATA', 'model_genome'))
except NoOptionError:
raise Exception('model_genome path was not supplied in settings.ini')
# these are optional
for data_pref in ['compartment_names', 'reaction_id_prefs',
'reaction_hash_prefs', 'gene_reaction_rule_prefs', 'data_source_preferences', 'model_dump_directory',
'model_published_directory', 'model_polished_directory']:
try:
setattr(self, data_pref, expanduser(config.get('DATA', data_pref)))
except NoOptionError:
setattr(self, data_pref, None)
load_settings_from_file()
del SafeConfigParser, modules | unknown | codeparrot/codeparrot-clean | ||
#encoding: utf8
import numpy as np
from scipy.misc import comb
from scipy.stats import norm
import matplotlib.pyplot as plt
import pdb
'''
容量和alpha, beta都有关, 和delta有关。一般delta取一个sigma
err1 = alpha = 0.1
err2 = beta = 0.2
正态样本容量,用来控制第二类错误(这么说还好不对)
delta 默认为一个标准差
Phi((c - mu0)*sqrt(n)/sigma) <= (1-alpha)
c <= ppf(1-alpha) * sigma/sqrt(n) + mu0
ds = delta / sigma #delta,sigma指定后,这个就确定了。引入这个变量主要是方例后继的计算
mu = mu0 + delta
Phi((c - mu)*sqrt(n)/sigma) < beta
Phi((c - mu)*sqrt(n)/sigma) < beta
Phi((c - mu0 - delta)*sqrt(n)/sigma) < beta
(c - mu0 - delta)*sqrt(n)/sigma) < Phi(beta)
(c - mu0 - delta)*sqrt(n)/sigma) < -Phi(1-beta)
#c取最大值,有
(Za * sigma/sqrt(n) + mu0 - mu0 - delta)*sqrt(n)/sigma) < -Zbeta
Za - delta*sqrt(n)/sigma < -Zbeta
sqrt(n) > (Za + Zbeta) * sigma/delta
'''
def norm_sample_contain(sigma, delta, max_alpha=0.1, max_beta=0.2):
if delta == None:
delta = sigma
Za = norm.ppf(1-max_alpha)
Zb = norm.ppf(1-max_beta)
min_sqrt_n = (Za + Zb) * sigma / delta
n = np.ceil(min_sqrt_n ** 2)
return n
'''
计算正态分布下的p值
'''
def p_value():
return None
if __name__ == "__main__":
colors = ['g', 'b', 'k']
#test contain of samples
mu0 = 0
sigma = 1.
betas = np.linspace(0.01, 0.3, num=50)
contains = np.zeros(len(betas))
for i in xrange(len(betas)):
beta = betas[i]
n = norm_sample_contain(sigma, delta=sigma, max_alpha=0.1, max_beta=beta)
contains[i] = n
plt.clf()
plt.plot(betas, contains, color='r')
print "betas:", betas
print "n:", contains
for i in xrange(len(betas)):
beta = betas[i]
n = norm_sample_contain(sigma, delta=sigma, max_alpha=0.05, max_beta=beta)
contains[i] = n
plt.plot(betas, contains, color='k')
print "betas:", betas
print "n:", contains
plt.savefig('images/norm_contain.png', format='png') | unknown | codeparrot/codeparrot-clean | ||
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents
from streamlink.stream import HLSStream
from streamlink.utils import parse_json
class ElTreceTV(Plugin):
_url_re = re.compile(r'https?://(?:www\.)?eltrecetv.com.ar/.+')
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url)
def _get_streams(self):
if "eltrecetv.com.ar/vivo" in self.url.lower():
try:
self.session.http.headers = {'Referer': self.url,
'User-Agent': useragents.ANDROID}
res = self.session.http.get('https://api.iamat.com/metadata/atcodes/eltrece')
yt_id = parse_json(res.text)["atcodes"][0]["context"]["ahora"]["vivo"]["youtubeVideo"]
yt_url = "https://www.youtube.com/watch?v={0}".format(yt_id)
return self.session.streams(yt_url)
except BaseException:
self.logger.info("Live content is temporarily unavailable. Please try again later.")
else:
try:
self.session.http.headers = {'Referer': self.url,
'User-Agent': useragents.CHROME}
res = self.session.http.get(self.url)
_player_re = re.compile(r'''data-kaltura="([^"]+)"''')
match = _player_re.search(res.text)
if not match:
return
entry_id = parse_json(match.group(1).replace(""", '"'))["entryId"]
hls_url = "https://vodgc.com/p/111/sp/11100/playManifest/entryId/{0}/format/applehttp/protocol/https/a.m3u8".format(entry_id)
return HLSStream.parse_variant_playlist(self.session, hls_url)
except BaseException:
self.logger.error("The requested VOD content is unavailable.")
__plugin__ = ElTreceTV | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.aot;
import java.lang.reflect.Constructor;
import java.lang.reflect.Executable;
import java.lang.reflect.Member;
import java.lang.reflect.Method;
import java.lang.reflect.Parameter;
import java.lang.reflect.Proxy;
import java.util.Arrays;
import java.util.function.Consumer;
import kotlin.jvm.JvmClassMappingKt;
import kotlin.reflect.KClass;
import kotlin.reflect.KFunction;
import kotlin.reflect.KParameter;
import org.jspecify.annotations.Nullable;
import org.springframework.aot.generate.AccessControl;
import org.springframework.aot.generate.AccessControl.Visibility;
import org.springframework.aot.generate.GeneratedMethod;
import org.springframework.aot.generate.GeneratedMethods;
import org.springframework.aot.generate.GenerationContext;
import org.springframework.aot.generate.MethodReference.ArgumentCodeGenerator;
import org.springframework.aot.hint.ExecutableMode;
import org.springframework.aot.hint.MemberCategory;
import org.springframework.aot.hint.ReflectionHints;
import org.springframework.aot.hint.RuntimeHints;
import org.springframework.beans.factory.config.DependencyDescriptor;
import org.springframework.beans.factory.support.AutowireCandidateResolver;
import org.springframework.beans.factory.support.DefaultListableBeanFactory;
import org.springframework.beans.factory.support.InstanceSupplier;
import org.springframework.beans.factory.support.RegisteredBean;
import org.springframework.beans.factory.support.RegisteredBean.InstantiationDescriptor;
import org.springframework.core.KotlinDetector;
import org.springframework.core.MethodParameter;
import org.springframework.core.ResolvableType;
import org.springframework.javapoet.ClassName;
import org.springframework.javapoet.CodeBlock;
import org.springframework.javapoet.CodeBlock.Builder;
import org.springframework.javapoet.MethodSpec;
import org.springframework.javapoet.ParameterizedTypeName;
import org.springframework.util.ClassUtils;
import org.springframework.util.function.ThrowingSupplier;
/**
* Default code generator to create an {@link InstanceSupplier}, usually in
* the form of a {@link BeanInstanceSupplier} that retains the executable
* that is used to instantiate the bean. Takes care of registering the
* necessary hints if reflection or a JDK proxy is required.
*
* <p>Generated code is usually a method reference that generates the
* {@link BeanInstanceSupplier}, but some shortcut can be used as well such as:
* <pre class="code">
* InstanceSupplier.of(TheGeneratedClass::getMyBeanInstance);
* </pre>
*
* @author Phillip Webb
* @author Stephane Nicoll
* @author Juergen Hoeller
* @author Sebastien Deleuze
* @since 6.0
* @see BeanRegistrationCodeFragments
*/
public class InstanceSupplierCodeGenerator {
private static final String REGISTERED_BEAN_PARAMETER_NAME = "registeredBean";
private static final String ARGS_PARAMETER_NAME = "args";
private static final javax.lang.model.element.Modifier[] PRIVATE_STATIC =
{javax.lang.model.element.Modifier.PRIVATE, javax.lang.model.element.Modifier.STATIC};
private static final CodeBlock NO_ARGS = CodeBlock.of("");
private static final boolean KOTLIN_REFLECT_PRESENT = KotlinDetector.isKotlinReflectPresent();
private final GenerationContext generationContext;
private final ClassName className;
private final GeneratedMethods generatedMethods;
private final boolean allowDirectSupplierShortcut;
/**
* Create a new generator instance.
* @param generationContext the generation context
* @param className the class name of the bean to instantiate
* @param generatedMethods the generated methods
* @param allowDirectSupplierShortcut whether a direct supplier may be used rather
* than always needing an {@link InstanceSupplier}
*/
public InstanceSupplierCodeGenerator(GenerationContext generationContext,
ClassName className, GeneratedMethods generatedMethods, boolean allowDirectSupplierShortcut) {
this.generationContext = generationContext;
this.className = className;
this.generatedMethods = generatedMethods;
this.allowDirectSupplierShortcut = allowDirectSupplierShortcut;
}
/**
* Generate the instance supplier code.
* @param registeredBean the bean to handle
* @param constructorOrFactoryMethod the executable to use to create the bean
* @return the generated code
* @deprecated in favor of {@link #generateCode(RegisteredBean, InstantiationDescriptor)}
*/
@Deprecated(since = "6.1.7")
public CodeBlock generateCode(RegisteredBean registeredBean, Executable constructorOrFactoryMethod) {
return generateCode(registeredBean, new InstantiationDescriptor(
constructorOrFactoryMethod, constructorOrFactoryMethod.getDeclaringClass()));
}
/**
* Generate the instance supplier code.
* @param registeredBean the bean to handle
* @param instantiationDescriptor the executable to use to create the bean
* @return the generated code
* @since 6.1.7
*/
public CodeBlock generateCode(RegisteredBean registeredBean, InstantiationDescriptor instantiationDescriptor) {
Executable constructorOrFactoryMethod = instantiationDescriptor.executable();
registerRuntimeHintsIfNecessary(registeredBean, constructorOrFactoryMethod);
if (constructorOrFactoryMethod instanceof Constructor<?> constructor) {
return generateCodeForConstructor(registeredBean, constructor);
}
if (constructorOrFactoryMethod instanceof Method method && !KotlinDetector.isSuspendingFunction(method)) {
return generateCodeForFactoryMethod(registeredBean, method, instantiationDescriptor.targetClass());
}
throw new AotBeanProcessingException(registeredBean, "no suitable constructor or factory method found");
}
private void registerRuntimeHintsIfNecessary(RegisteredBean registeredBean, Executable constructorOrFactoryMethod) {
if (registeredBean.getBeanFactory() instanceof DefaultListableBeanFactory dlbf) {
RuntimeHints runtimeHints = this.generationContext.getRuntimeHints();
ProxyRuntimeHintsRegistrar registrar = new ProxyRuntimeHintsRegistrar(dlbf.getAutowireCandidateResolver());
registrar.registerRuntimeHints(runtimeHints, constructorOrFactoryMethod);
}
}
private CodeBlock generateCodeForConstructor(RegisteredBean registeredBean, Constructor<?> constructor) {
ConstructorDescriptor descriptor = new ConstructorDescriptor(
registeredBean.getBeanName(), constructor, registeredBean.getBeanClass());
Class<?> publicType = descriptor.publicType();
if (KOTLIN_REFLECT_PRESENT && KotlinDetector.isKotlinType(publicType) && KotlinDelegate.hasConstructorWithOptionalParameter(publicType)) {
return generateCodeForInaccessibleConstructor(descriptor,
hints -> hints.registerType(publicType, MemberCategory.INVOKE_DECLARED_CONSTRUCTORS));
}
if (!isVisible(constructor, constructor.getDeclaringClass()) ||
registeredBean.getMergedBeanDefinition().hasMethodOverrides()) {
return generateCodeForInaccessibleConstructor(descriptor,
hints -> hints.registerConstructor(constructor, ExecutableMode.INVOKE));
}
return generateCodeForAccessibleConstructor(descriptor);
}
private CodeBlock generateCodeForAccessibleConstructor(ConstructorDescriptor descriptor) {
Constructor<?> constructor = descriptor.constructor();
this.generationContext.getRuntimeHints().reflection().registerType(constructor.getDeclaringClass());
if (constructor.getParameterCount() == 0) {
if (!this.allowDirectSupplierShortcut) {
return CodeBlock.of("$T.using($T::new)", InstanceSupplier.class, descriptor.actualType());
}
if (!isThrowingCheckedException(constructor)) {
return CodeBlock.of("$T::new", descriptor.actualType());
}
return CodeBlock.of("$T.of($T::new)", ThrowingSupplier.class, descriptor.actualType());
}
GeneratedMethod generatedMethod = generateGetInstanceSupplierMethod(method ->
buildGetInstanceMethodForConstructor(method, descriptor, PRIVATE_STATIC));
return generateReturnStatement(generatedMethod);
}
private CodeBlock generateCodeForInaccessibleConstructor(ConstructorDescriptor descriptor,
Consumer<ReflectionHints> hints) {
Constructor<?> constructor = descriptor.constructor();
CodeWarnings codeWarnings = new CodeWarnings();
codeWarnings.detectDeprecation(constructor.getDeclaringClass(), constructor)
.detectDeprecation(Arrays.stream(constructor.getParameters()).map(Parameter::getType));
hints.accept(this.generationContext.getRuntimeHints().reflection());
GeneratedMethod generatedMethod = generateGetInstanceSupplierMethod(method -> {
method.addJavadoc("Get the bean instance supplier for '$L'.", descriptor.beanName());
method.addModifiers(PRIVATE_STATIC);
codeWarnings.suppress(method);
method.returns(ParameterizedTypeName.get(BeanInstanceSupplier.class, descriptor.publicType()));
method.addStatement(generateResolverForConstructor(descriptor));
});
return generateReturnStatement(generatedMethod);
}
private void buildGetInstanceMethodForConstructor(MethodSpec.Builder method, ConstructorDescriptor descriptor,
javax.lang.model.element.Modifier... modifiers) {
Constructor<?> constructor = descriptor.constructor();
Class<?> publicType = descriptor.publicType();
Class<?> actualType = descriptor.actualType();
CodeWarnings codeWarnings = new CodeWarnings();
codeWarnings.detectDeprecation(actualType, constructor)
.detectDeprecation(Arrays.stream(constructor.getParameters()).map(Parameter::getType));
method.addJavadoc("Get the bean instance supplier for '$L'.", descriptor.beanName());
method.addModifiers(modifiers);
codeWarnings.suppress(method);
method.returns(ParameterizedTypeName.get(BeanInstanceSupplier.class, publicType));
CodeBlock.Builder code = CodeBlock.builder();
code.add(generateResolverForConstructor(descriptor));
boolean hasArguments = constructor.getParameterCount() > 0;
boolean onInnerClass = ClassUtils.isInnerClass(actualType);
CodeBlock arguments = hasArguments ?
new AutowiredArgumentsCodeGenerator(actualType, constructor)
.generateCode(constructor.getParameterTypes(), (onInnerClass ? 1 : 0)) : NO_ARGS;
CodeBlock newInstance = generateNewInstanceCodeForConstructor(actualType, arguments);
code.add(generateWithGeneratorCode(hasArguments, newInstance));
method.addStatement(code.build());
}
private CodeBlock generateResolverForConstructor(ConstructorDescriptor descriptor) {
CodeBlock parameterTypes = generateParameterTypesCode(descriptor.constructor().getParameterTypes());
return CodeBlock.of("return $T.<$T>forConstructor($L)", BeanInstanceSupplier.class,
descriptor.publicType(), parameterTypes);
}
private CodeBlock generateNewInstanceCodeForConstructor(Class<?> declaringClass, CodeBlock args) {
if (ClassUtils.isInnerClass(declaringClass)) {
return CodeBlock.of("$L.getBeanFactory().getBean($T.class).new $L($L)",
REGISTERED_BEAN_PARAMETER_NAME, declaringClass.getEnclosingClass(),
declaringClass.getSimpleName(), args);
}
return CodeBlock.of("new $T($L)", declaringClass, args);
}
private CodeBlock generateCodeForFactoryMethod(
RegisteredBean registeredBean, Method factoryMethod, Class<?> targetClass) {
if (!isVisible(factoryMethod, targetClass)) {
return generateCodeForInaccessibleFactoryMethod(registeredBean.getBeanName(), factoryMethod, targetClass);
}
return generateCodeForAccessibleFactoryMethod(registeredBean.getBeanName(), factoryMethod, targetClass,
registeredBean.getMergedBeanDefinition().getFactoryBeanName());
}
private CodeBlock generateCodeForAccessibleFactoryMethod(String beanName,
Method factoryMethod, Class<?> targetClass, @Nullable String factoryBeanName) {
this.generationContext.getRuntimeHints().reflection().registerType(factoryMethod.getDeclaringClass());
if (factoryBeanName == null && factoryMethod.getParameterCount() == 0) {
Class<?> suppliedType = ClassUtils.resolvePrimitiveIfNecessary(factoryMethod.getReturnType());
CodeBlock.Builder code = CodeBlock.builder();
code.add("$T.<$T>forFactoryMethod($T.class, $S)", BeanInstanceSupplier.class,
suppliedType, targetClass, factoryMethod.getName());
code.add(".withGenerator(($L) -> $T.$L())", REGISTERED_BEAN_PARAMETER_NAME,
ClassUtils.getUserClass(targetClass), factoryMethod.getName());
return code.build();
}
GeneratedMethod getInstanceMethod = generateGetInstanceSupplierMethod(method ->
buildGetInstanceMethodForFactoryMethod(method, beanName, factoryMethod,
targetClass, factoryBeanName, PRIVATE_STATIC));
return generateReturnStatement(getInstanceMethod);
}
private CodeBlock generateCodeForInaccessibleFactoryMethod(
String beanName, Method factoryMethod, Class<?> targetClass) {
this.generationContext.getRuntimeHints().reflection().registerMethod(factoryMethod, ExecutableMode.INVOKE);
GeneratedMethod getInstanceMethod = generateGetInstanceSupplierMethod(method -> {
CodeWarnings codeWarnings = new CodeWarnings();
Class<?> suppliedType = ClassUtils.resolvePrimitiveIfNecessary(factoryMethod.getReturnType());
codeWarnings.detectDeprecation(suppliedType, factoryMethod);
method.addJavadoc("Get the bean instance supplier for '$L'.", beanName);
method.addModifiers(PRIVATE_STATIC);
codeWarnings.suppress(method);
method.returns(ParameterizedTypeName.get(BeanInstanceSupplier.class, suppliedType));
method.addStatement(generateInstanceSupplierForFactoryMethod(
factoryMethod, suppliedType, targetClass, factoryMethod.getName()));
});
return generateReturnStatement(getInstanceMethod);
}
private void buildGetInstanceMethodForFactoryMethod(MethodSpec.Builder method,
String beanName, Method factoryMethod, Class<?> targetClass,
@Nullable String factoryBeanName, javax.lang.model.element.Modifier... modifiers) {
String factoryMethodName = factoryMethod.getName();
Class<?> suppliedType = ClassUtils.resolvePrimitiveIfNecessary(factoryMethod.getReturnType());
CodeWarnings codeWarnings = new CodeWarnings();
codeWarnings.detectDeprecation(ClassUtils.getUserClass(targetClass), factoryMethod, suppliedType)
.detectDeprecation(Arrays.stream(factoryMethod.getParameters()).map(Parameter::getType));
method.addJavadoc("Get the bean instance supplier for '$L'.", beanName);
method.addModifiers(modifiers);
codeWarnings.suppress(method);
method.returns(ParameterizedTypeName.get(BeanInstanceSupplier.class, suppliedType));
CodeBlock.Builder code = CodeBlock.builder();
code.add(generateInstanceSupplierForFactoryMethod(
factoryMethod, suppliedType, targetClass, factoryMethodName));
boolean hasArguments = factoryMethod.getParameterCount() > 0;
CodeBlock arguments = hasArguments ?
new AutowiredArgumentsCodeGenerator(ClassUtils.getUserClass(targetClass), factoryMethod)
.generateCode(factoryMethod.getParameterTypes()) : NO_ARGS;
CodeBlock newInstance = generateNewInstanceCodeForMethod(
factoryBeanName, ClassUtils.getUserClass(targetClass), factoryMethodName, arguments);
code.add(generateWithGeneratorCode(hasArguments, newInstance));
method.addStatement(code.build());
}
private CodeBlock generateInstanceSupplierForFactoryMethod(Method factoryMethod,
Class<?> suppliedType, Class<?> targetClass, String factoryMethodName) {
if (factoryMethod.getParameterCount() == 0) {
return CodeBlock.of("return $T.<$T>forFactoryMethod($T.class, $S)",
BeanInstanceSupplier.class, suppliedType, targetClass, factoryMethodName);
}
CodeBlock parameterTypes = generateParameterTypesCode(factoryMethod.getParameterTypes());
return CodeBlock.of("return $T.<$T>forFactoryMethod($T.class, $S, $L)",
BeanInstanceSupplier.class, suppliedType, targetClass, factoryMethodName, parameterTypes);
}
private CodeBlock generateNewInstanceCodeForMethod(@Nullable String factoryBeanName,
Class<?> targetClass, String factoryMethodName, CodeBlock args) {
if (factoryBeanName == null) {
return CodeBlock.of("$T.$L($L)", targetClass, factoryMethodName, args);
}
return CodeBlock.of("$L.getBeanFactory().getBean(\"$L\", $T.class).$L($L)",
REGISTERED_BEAN_PARAMETER_NAME, factoryBeanName, targetClass, factoryMethodName, args);
}
private CodeBlock generateReturnStatement(GeneratedMethod generatedMethod) {
return generatedMethod.toMethodReference().toInvokeCodeBlock(
ArgumentCodeGenerator.none(), this.className);
}
private CodeBlock generateWithGeneratorCode(boolean hasArguments, CodeBlock newInstance) {
CodeBlock lambdaArguments = (hasArguments ?
CodeBlock.of("($L, $L)", REGISTERED_BEAN_PARAMETER_NAME, ARGS_PARAMETER_NAME) :
CodeBlock.of("($L)", REGISTERED_BEAN_PARAMETER_NAME));
Builder code = CodeBlock.builder();
code.add("\n");
code.indent().indent();
code.add(".withGenerator($L -> $L)", lambdaArguments, newInstance);
code.unindent().unindent();
return code.build();
}
private boolean isVisible(Member member, Class<?> targetClass) {
AccessControl classAccessControl = AccessControl.forClass(targetClass);
AccessControl memberAccessControl = AccessControl.forMember(member);
Visibility visibility = AccessControl.lowest(classAccessControl, memberAccessControl).getVisibility();
return (visibility == Visibility.PUBLIC || (visibility != Visibility.PRIVATE &&
member.getDeclaringClass().getPackageName().equals(this.className.packageName())));
}
private CodeBlock generateParameterTypesCode(Class<?>[] parameterTypes) {
CodeBlock.Builder code = CodeBlock.builder();
for (int i = 0; i < parameterTypes.length; i++) {
code.add(i > 0 ? ", " : "");
code.add("$T.class", parameterTypes[i]);
}
return code.build();
}
private GeneratedMethod generateGetInstanceSupplierMethod(Consumer<MethodSpec.Builder> method) {
return this.generatedMethods.add("getInstanceSupplier", method);
}
private boolean isThrowingCheckedException(Executable executable) {
return Arrays.stream(executable.getGenericExceptionTypes())
.map(ResolvableType::forType)
.map(ResolvableType::toClass)
.anyMatch(Exception.class::isAssignableFrom);
}
/**
* Inner class to avoid a hard dependency on Kotlin at runtime.
*/
private static class KotlinDelegate {
public static boolean hasConstructorWithOptionalParameter(Class<?> beanClass) {
KClass<?> kClass = JvmClassMappingKt.getKotlinClass(beanClass);
for (KFunction<?> constructor : kClass.getConstructors()) {
for (KParameter parameter : constructor.getParameters()) {
if (parameter.isOptional()) {
return true;
}
}
}
return false;
}
}
private record ProxyRuntimeHintsRegistrar(AutowireCandidateResolver candidateResolver) {
public void registerRuntimeHints(RuntimeHints runtimeHints, Executable executable) {
Class<?>[] parameterTypes = executable.getParameterTypes();
for (int i = 0; i < parameterTypes.length; i++) {
MethodParameter methodParam = MethodParameter.forExecutable(executable, i);
DependencyDescriptor dependencyDescriptor = new DependencyDescriptor(methodParam, true);
registerProxyIfNecessary(runtimeHints, dependencyDescriptor);
}
}
private void registerProxyIfNecessary(RuntimeHints runtimeHints, DependencyDescriptor dependencyDescriptor) {
Class<?> proxyType = this.candidateResolver.getLazyResolutionProxyClass(dependencyDescriptor, null);
if (proxyType != null && Proxy.isProxyClass(proxyType)) {
runtimeHints.proxies().registerJdkProxy(proxyType.getInterfaces());
}
}
}
record ConstructorDescriptor(String beanName, Constructor<?> constructor, Class<?> publicType) {
Class<?> actualType() {
return this.constructor.getDeclaringClass();
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/main/java/org/springframework/beans/factory/aot/InstanceSupplierCodeGenerator.java |
/* Copyright (c) 2015, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#ifndef SQL_THD_INTERNAL_API_INCLUDED
#define SQL_THD_INTERNAL_API_INCLUDED
/*
This file defines THD-related API calls that are meant for internal
usage (e.g. InnoDB, Thread Pool) only. There are therefore no stability
guarantees.
*/
#include <stddef.h>
#include <sys/types.h>
#include "dur_prop.h" // durability_properties
#include "lex_string.h"
#include "mysql/components/services/bits/psi_thread_bits.h"
#include "mysql/strings/m_ctype.h"
#include "sql/handler.h" // enum_tx_isolation
class THD;
class partition_info;
THD *create_internal_thd();
void destroy_internal_thd(THD *thd);
/**
Set up various THD data for a new connection.
@note PFS instrumentation is not set by this function.
@param thd THD object
@param stack_start Start of stack for connection
*/
void thd_init(THD *thd, char *stack_start);
/**
Set up various THD data for a new connection
@param thd THD object
@param stack_start Start of stack for connection
@param bound True if bound to a physical thread.
@param psi_key Instrumentation key for the thread.
@param psi_seqnum Instrumentation sequence number for the thread.
*/
void thd_init(THD *thd, char *stack_start, bool bound, PSI_thread_key psi_key,
unsigned int psi_seqnum);
/**
Create a THD and do proper initialization of it.
@param enable_plugins Should dynamic plugin support be enabled?
@param background_thread Is this a background thread?
@param bound True if bound to a physical thread.
@param psi_key Instrumentation key for the thread.
@param psi_seqnum Instrumentation sequence number for the thread.
@note Dynamic plugin support is only possible for THDs that
are created after the server has initialized properly.
@note THDs for background threads are currently not added to
the global THD list. So they will e.g. not be visible in
SHOW PROCESSLIST and the server will not wait for them to
terminate during shutdown.
*/
THD *create_thd(bool enable_plugins, bool background_thread, bool bound,
PSI_thread_key psi_key, unsigned int psi_seqnum);
/**
Cleanup the THD object, remove it from the global list of THDs
and delete it.
@param thd Pointer to THD object.
@param clear_pfs_instr If true, then clear thread PFS instrumentations.
*/
void destroy_thd(THD *thd, bool clear_pfs_instr);
/**
Cleanup the THD object, remove it from the global list of THDs
and delete it.
@param thd Pointer to THD object.
*/
void destroy_thd(THD *thd);
/**
Set thread stack in THD object
@param thd Thread object
@param stack_start Start of stack to set in THD object
*/
void thd_set_thread_stack(THD *thd, const char *stack_start);
/**
Returns the partition_info working copy.
Used to see if a table should be created with partitioning.
@param thd thread context
@return Pointer to the working copy of partition_info or NULL.
*/
partition_info *thd_get_work_part_info(THD *thd);
enum_tx_isolation thd_get_trx_isolation(const THD *thd);
const CHARSET_INFO *thd_charset(THD *thd);
/**
Get the current query string for the thread.
@param thd The MySQL internal thread pointer
@return query string and length. May be non-null-terminated.
@note This function is not thread safe and should only be called
from the thread owning thd. @see thd_query_safe().
*/
LEX_CSTRING thd_query_unsafe(THD *thd);
/**
Get the current query string for the thread.
@param thd The MySQL internal thread pointer
@param buf Buffer where the query string will be copied
@param buflen Length of the buffer
@return Length of the query
@note This function is thread safe as the query string is
accessed under mutex protection and the string is copied
into the provided buffer. @see thd_query_unsafe().
*/
size_t thd_query_safe(THD *thd, char *buf, size_t buflen);
/**
Check if a user thread is a replication slave thread
@param thd user thread
@retval 0 the user thread is not a replication slave thread
@retval 1 the user thread is a replication slave thread
*/
int thd_slave_thread(const THD *thd);
/**
Check if a user thread is running a non-transactional update
@param thd user thread
@retval 0 the user thread is not running a non-transactional update
@retval 1 the user thread is running a non-transactional update
*/
int thd_non_transactional_update(const THD *thd);
/**
Get the user thread's binary logging format
@param thd user thread
@return Value to be used as index into the binlog_format_names array
*/
int thd_binlog_format(const THD *thd);
/**
Check if binary logging is filtered for thread's current db.
@param thd Thread handle
@retval 1 the query is not filtered, 0 otherwise.
*/
bool thd_binlog_filter_ok(const THD *thd);
/**
Check if the query may generate row changes which may end up in the binary.
@param thd Thread handle
@retval 1 the query may generate row changes, 0 otherwise.
*/
bool thd_sqlcom_can_generate_row_events(const THD *thd);
/**
Gets information on the durability property requested by a thread.
@param thd Thread handle
@return a durability property.
*/
durability_properties thd_get_durability_property(const THD *thd);
/**
Get the auto_increment_offset auto_increment_increment.
@param thd Thread object
@param off auto_increment_offset
@param inc auto_increment_increment
*/
void thd_get_autoinc(const THD *thd, ulong *off, ulong *inc);
/**
Get the tmp_table_size threshold.
@param thd Thread object
@return Value of currently set tmp_table_size threshold.
*/
size_t thd_get_tmp_table_size(const THD *thd);
/**
Is strict sql_mode set.
Needed by InnoDB.
@param thd Thread object
@return True if sql_mode has strict mode (all or trans).
@retval true sql_mode has strict mode (all or trans).
@retval false sql_mode has not strict mode (all or trans).
*/
bool thd_is_strict_mode(const THD *thd);
/**
Is an error set in the DA.
Needed by InnoDB to catch behavior modified by an error handler.
@param thd Thread object
@return True if THD::is_error() returns true.
@retval true An error has been raised.
@retval false No error has been raised.
*/
bool thd_is_error(const THD *thd);
/**
Test a file path whether it is same as mysql data directory path.
@param path null terminated character string
@retval true The path is different from mysql data directory.
@retval false The path is same as mysql data directory.
*/
bool is_mysql_datadir_path(const char *path);
/**
Create a temporary file.
@details
The temporary file is created in a location specified by the parameter
path. if path is null, then it will be created on the location given
by the mysql server configuration (--tmpdir option). The caller
does not need to delete the file, it will be deleted automatically.
@param path location for creating temporary file
@param prefix prefix for temporary file name
@retval -1 error
@retval >=0 a file handle that can be passed to dup or my_close
*/
int mysql_tmpfile_path(const char *path, const char *prefix);
/**
Check if the server is in the process of being initialized.
Check the thread type of the THD. If this is a thread type
being used for initializing the DD or the server, return
true.
@param thd Needed since this is an opaque type in the SE.
@retval true The thread is a bootstrap thread.
@retval false The thread is not a bootstrap thread.
*/
bool thd_is_bootstrap_thread(THD *thd);
/**
Is statement updating the data dictionary tables.
@details
The thread switches to the data dictionary tables update context using
the dd::Update_dictionary_tables_ctx while updating dictionary tables.
If thread is in this context then the method returns true otherwise
false.
This method is used by the InnoDB while updating the tables to mark
transaction as DDL if this method returns true.
@param thd Thread handle.
@retval true Updates data dictionary tables.
@retval false Otherwise.
*/
bool thd_is_dd_update_stmt(const THD *thd);
my_thread_id thd_thread_id(const THD *thd);
/**
Check if SQL Layer FK handling is enabled.
@retval true use SQL Layer FK handling, false otherwise.
*/
bool thd_is_sql_fk_checks_enabled();
#endif // SQL_THD_INTERNAL_API_INCLUDED | c | github | https://github.com/mysql/mysql-server | sql/sql_thd_internal_api.h |
//! Cache for candidate selection.
use std::hash::Hash;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::sync::Lock;
use crate::dep_graph::DepNodeIndex;
use crate::ty::TyCtxt;
pub struct WithDepNodeCache<Key, Value> {
hashmap: Lock<FxHashMap<Key, WithDepNode<Value>>>,
}
impl<Key: Clone, Value: Clone> Clone for WithDepNodeCache<Key, Value> {
fn clone(&self) -> Self {
Self { hashmap: Lock::new(self.hashmap.borrow().clone()) }
}
}
impl<Key, Value> Default for WithDepNodeCache<Key, Value> {
fn default() -> Self {
Self { hashmap: Default::default() }
}
}
impl<Key: Eq + Hash, Value: Clone> WithDepNodeCache<Key, Value> {
pub fn get<'tcx>(&self, key: &Key, tcx: TyCtxt<'tcx>) -> Option<Value> {
Some(self.hashmap.borrow().get(key)?.get(tcx))
}
pub fn insert(&self, key: Key, dep_node: DepNodeIndex, value: Value) {
self.hashmap.borrow_mut().insert(key, WithDepNode::new(dep_node, value));
}
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct WithDepNode<T> {
dep_node: DepNodeIndex,
cached_value: T,
}
impl<T: Clone> WithDepNode<T> {
pub(crate) fn new(dep_node: DepNodeIndex, cached_value: T) -> Self {
WithDepNode { dep_node, cached_value }
}
pub(crate) fn get<'tcx>(&self, tcx: TyCtxt<'tcx>) -> T {
tcx.dep_graph.read_index(self.dep_node);
self.cached_value.clone()
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_middle/src/traits/cache.rs |
// RUN: %check_clang_tidy %s bugprone-misplaced-operator-in-strlen-in-alloc %t
typedef __typeof(sizeof(int)) size_t;
void *malloc(size_t);
void *alloca(size_t);
void *calloc(size_t, size_t);
void *realloc(void *, size_t);
size_t strlen(const char *);
size_t strnlen(const char *, size_t);
size_t strnlen_s(const char *, size_t);
typedef unsigned wchar_t;
size_t wcslen(const wchar_t *);
size_t wcsnlen(const wchar_t *, size_t);
size_t wcsnlen_s(const wchar_t *, size_t);
void bad_malloc(char *name) {
char *new_name = (char *)malloc(strlen(name + 1));
// CHECK-MESSAGES: :[[@LINE-1]]:28: warning: addition operator is applied to the argument of strlen
// CHECK-FIXES: char *new_name = (char *)malloc(strlen(name) + 1);
new_name = (char *)malloc(strnlen(name + 1, 10));
// CHECK-MESSAGES: :[[@LINE-1]]:22: warning: addition operator is applied to the argument of strnlen
// CHECK-FIXES: new_name = (char *)malloc(strnlen(name, 10) + 1);
new_name = (char *)malloc(strnlen_s(name + 1, 10));
// CHECK-MESSAGES: :[[@LINE-1]]:22: warning: addition operator is applied to the argument of strnlen_s
// CHECK-FIXES: new_name = (char *)malloc(strnlen_s(name, 10) + 1);
}
void bad_malloc_wide(wchar_t *name) {
wchar_t *new_name = (wchar_t *)malloc(wcslen(name + 1));
// CHECK-MESSAGES: :[[@LINE-1]]:34: warning: addition operator is applied to the argument of wcslen
// CHECK-FIXES: wchar_t *new_name = (wchar_t *)malloc(wcslen(name) + 1);
new_name = (wchar_t *)malloc(wcsnlen(name + 1, 10));
// CHECK-MESSAGES: :[[@LINE-1]]:25: warning: addition operator is applied to the argument of wcsnlen
// CHECK-FIXES: new_name = (wchar_t *)malloc(wcsnlen(name, 10) + 1);
new_name = (wchar_t *)malloc(wcsnlen_s(name + 1, 10));
// CHECK-MESSAGES: :[[@LINE-1]]:25: warning: addition operator is applied to the argument of wcsnlen_s
// CHECK-FIXES: new_name = (wchar_t *)malloc(wcsnlen_s(name, 10) + 1);
}
void bad_alloca(char *name) {
char *new_name = (char *)alloca(strlen(name + 1));
// CHECK-MESSAGES: :[[@LINE-1]]:28: warning: addition operator is applied to the argument of strlen
// CHECK-FIXES: char *new_name = (char *)alloca(strlen(name) + 1);
}
void bad_calloc(char *name) {
char *new_names = (char *)calloc(2, strlen(name + 1));
// CHECK-MESSAGES: :[[@LINE-1]]:29: warning: addition operator is applied to the argument of strlen
// CHECK-FIXES: char *new_names = (char *)calloc(2, strlen(name) + 1);
}
void bad_realloc(char *old_name, char *name) {
char *new_name = (char *)realloc(old_name, strlen(name + 1));
// CHECK-MESSAGES: :[[@LINE-1]]:28: warning: addition operator is applied to the argument of strlen
// CHECK-FIXES: char *new_name = (char *)realloc(old_name, strlen(name) + 1);
}
void intentional1(char *name) {
char *new_name = (char *)malloc(strlen(name + 1) + 1);
// CHECK-MESSAGES-NOT: :[[@LINE-1]]:28: warning: addition operator is applied to the argument of strlen
// We have + 1 outside as well so we assume this is intentional
}
void intentional2(char *name) {
char *new_name = (char *)malloc(strlen(name + 2));
// CHECK-MESSAGES-NOT: :[[@LINE-1]]:28: warning: addition operator is applied to the argument of strlen
// Only give warning for + 1, not + 2
}
void intentional3(char *name) {
char *new_name = (char *)malloc(strlen((name + 1)));
// CHECK-MESSAGES-NOT: :[[@LINE-1]]:28: warning: addition operator is applied to the argument of strlen
// If expression is in extra parentheses, consider it as intentional
}
void (*(*const alloc_ptr)(size_t)) = malloc;
void bad_indirect_alloc(char *name) {
char *new_name = (char *)alloc_ptr(strlen(name + 1));
// CHECK-MESSAGES: :[[@LINE-1]]:28: warning: addition operator is applied to the argument of strlen
// CHECK-FIXES: char *new_name = (char *)alloc_ptr(strlen(name) + 1);
} | c | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-tidy/checkers/bugprone/misplaced-operator-in-strlen-in-alloc.c |
# frozen_string_literal: true
# :markup: markdown
module ActionCable
module Server
class Worker
module ActiveRecordConnectionManagement
extend ActiveSupport::Concern
included do
if defined?(ActiveRecord::Base)
set_callback :work, :around, :with_database_connections
end
end
def with_database_connections(&block)
connection.logger.tag(ActiveRecord::Base.logger, &block)
end
end
end
end
end | ruby | github | https://github.com/rails/rails | actioncable/lib/action_cable/server/worker/active_record_connection_management.rb |
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security/username"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/catpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/dbdesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/descs"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/schemadesc"
"github.com/cockroachdb/cockroach/pkg/sql/catalog/tabledesc"
"github.com/cockroachdb/cockroach/pkg/sql/clusterunique"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/parser/statements"
"github.com/cockroachdb/cockroach/pkg/sql/sem/eval"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondatapb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/errors"
)
// CreateTestTableDescriptor converts a SQL string to a table for test purposes.
// Will fail on complex tables where that operation requires e.g. looking up
// other tables.
func CreateTestTableDescriptor(
ctx context.Context,
parentID, id descpb.ID,
schema string,
privileges *catpb.PrivilegeDescriptor,
txn *kv.Txn,
collection *descs.Collection,
) (*tabledesc.Mutable, error) {
st := cluster.MakeTestingClusterSettings()
stmt, err := parser.ParseOne(schema)
if err != nil {
return nil, err
}
semaCtx := tree.MakeSemaContext(nil /* resolver */)
evalCtx := eval.MakeTestingEvalContext(st)
sessionData := &sessiondata.SessionData{
LocalOnlySessionData: sessiondatapb.LocalOnlySessionData{
EnableUniqueWithoutIndexConstraints: true,
},
}
switch n := stmt.AST.(type) {
case *tree.CreateTable:
db := dbdesc.NewInitial(parentID, "test", username.RootUserName())
desc, err := NewTableDesc(
ctx,
nil, /* txn */
NewSkippingCacheSchemaResolver(collection, sessiondata.NewStack(sessionData), txn, nil),
st,
n,
db,
schemadesc.GetPublicSchema(),
id,
nil, /* regionConfig */
hlc.Timestamp{}, /* creationTime */
privileges,
make(map[descpb.ID]*tabledesc.Mutable),
&semaCtx,
&evalCtx,
sessionData,
tree.PersistencePermanent,
nil, /* colToSequenceRefs */
)
return desc, err
case *tree.CreateSequence:
desc, err := NewSequenceTableDesc(
ctx,
nil, /* planner */
st,
n.Name.Table(),
n.Options,
parentID, keys.PublicSchemaID, id,
hlc.Timestamp{}, /* creationTime */
privileges,
tree.PersistencePermanent,
false, /* isMultiRegion */
)
return desc, err
default:
return nil, errors.Errorf("unexpected AST %T", stmt.AST)
}
}
// StmtBufReader is an exported interface for reading a StmtBuf.
// Normally only the write interface of the buffer is exported, as it is used by
// the pgwire.
type StmtBufReader struct {
buf *StmtBuf
}
// MakeStmtBufReader creates a StmtBufReader.
func MakeStmtBufReader(buf *StmtBuf) StmtBufReader {
return StmtBufReader{buf: buf}
}
// CurCmd returns the current command in the buffer.
func (r StmtBufReader) CurCmd() (Command, error) {
cmd, _ /* pos */, err := r.buf.CurCmd()
return cmd, err
}
// AdvanceOne moves the cursor one position over.
func (r *StmtBufReader) AdvanceOne() {
r.buf.AdvanceOne()
}
// Exec is a test utility function that takes a localPlanner (of type
// interface{} so that external packages can call NewInternalPlanner and pass
// the result) and executes a sql statement through the DistSQLPlanner.
func (dsp *DistSQLPlanner) Exec(
ctx context.Context,
localPlanner interface{},
stmt statements.Statement[tree.Statement],
distribute bool,
) error {
p := localPlanner.(*planner)
p.stmt = makeStatement(
ctx, stmt, clusterunique.ID{}, /* queryID */
tree.FmtFlags(tree.QueryFormattingForFingerprintsMask.Get(&p.execCfg.Settings.SV)),
nil, /* statementHintsCache */
)
if err := p.makeOptimizerPlan(ctx); err != nil {
return err
}
defer p.curPlan.close(ctx)
rw := NewCallbackResultWriter(func(ctx context.Context, row tree.Datums) error {
return nil
})
execCfg := p.ExecCfg()
recv := MakeDistSQLReceiver(
ctx,
rw,
stmt.AST.StatementReturnType(),
execCfg.RangeDescriptorCache,
p.txn,
execCfg.Clock,
p.ExtendedEvalContext().Tracing,
)
defer recv.Release()
distributionType := DistributionType(LocalDistribution)
if distribute {
distributionType = FullDistribution
}
evalCtx := p.ExtendedEvalContext()
planCtx := execCfg.DistSQLPlanner.NewPlanningCtx(ctx, evalCtx, p, p.txn,
distributionType)
planCtx.stmtType = recv.stmtType
dsp.PlanAndRun(ctx, evalCtx, planCtx, p.txn, p.curPlan.main, recv, nil /* finishedSetupFn */)
return rw.Err()
} | go | github | https://github.com/cockroachdb/cockroach | pkg/sql/testutils.go |
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.build.properties;
import java.util.Locale;
/**
* The type of build being performed.
*
* @author Phillip Webb
*/
public enum BuildType {
/**
* An open source build.
*/
OPEN_SOURCE,
/**
* A commercial build.
*/
COMMERCIAL;
public String toIdentifier() {
return toString().replace("_", "").toLowerCase(Locale.ROOT);
}
} | java | github | https://github.com/spring-projects/spring-boot | buildSrc/src/main/java/org/springframework/boot/build/properties/BuildType.java |
#!/usr/bin/env python3
# ====================================
# Copyright (c) Microsoft Corporation. All rights reserved.
# ====================================
import os
import sys
from optparse import OptionParser
import grp
import pwd
import util
# append worker binary source path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
util.add_all_packages_under_automationworker_to_sys_path()
from worker import configuration3 as configuration
from worker import serializerfactory
from worker import linuxutil
from worker import diydirs
json = serializerfactory.get_serializer(sys.version_info)
configuration.clear_config()
configuration.set_config({configuration.PROXY_CONFIGURATION_PATH: "/etc/opt/microsoft/omsagent/proxy.conf",
configuration.WORKER_VERSION: "OMSUtil",
configuration.WORKING_DIRECTORY_PATH: "/tmp"})
USERNAME_NXAUTOMATION = "nxautomation"
GROUPNAME_NXAUTOMATION = "nxautomation"
GROUPNAME_OMSAGENT = "omsagent"
def initialize():
"""Initializes the OMS environment. Meant to be executed everytime the resource's set method is invoked.
Steps:
- Sets omsagent group to nxautomation user (if needed).
- Sets group read permission to MSFT keyring.gpg
- Sets group read and execute to the OMS certificate folder.
Args:
None
"""
# add nxautomation to omsagent group
nxautomation_uid = int(pwd.getpwnam(USERNAME_NXAUTOMATION).pw_uid)
if os.getuid() == nxautomation_uid:
omsagent_group = grp.getgrnam(GROUPNAME_OMSAGENT)
if USERNAME_NXAUTOMATION not in omsagent_group.gr_mem:
print_success_message = False
process, output, error = linuxutil.popen_communicate(["sudo", "/usr/sbin/usermod", "-g", "nxautomation",
"-a", "-G", "omsagent,omiusers", "nxautomation"])
if process.returncode != 0:
# try again with -A instead of -a for SUSE Linux
process, output, error = linuxutil.popen_communicate(["sudo", "/usr/sbin/usermod", "-g", "nxautomation",
"-A", "omsagent,omiusers", "nxautomation"])
error = error.decode() if isinstance(error, bytes) else error
if process.returncode != 0:
raise Exception("Unable to add nxautomation to omsagent group. Error: " + str(error))
else:
print_success_message = True
else:
print_success_message = True
if print_success_message:
print("Successfully added omsagent secondary group to nxautomation user.")
# change permissions for the keyring.gpg
process, output, error = linuxutil.popen_communicate(["sudo", "chmod", "g+r",
"/etc/opt/omi/conf/omsconfig/keyring.gpg"])
if process.returncode != 0:
raise Exception("Unable set group permission to keyring. Error: " + str(error))
else:
print("Successfully set group permissions to keyring.gpg.")
# change permission for the certificate folder, oms.crt and oms.key
process, output, error = linuxutil.popen_communicate(["sudo", "chmod", "g+rx", "-R",
"/etc/opt/microsoft/omsagent/certs"])
error = error.decode() if isinstance(error, bytes) else error
if process.returncode != 0:
raise Exception("Unable set group permissions to certificate folder. Error: " + str(error))
else:
print("Successfully set group permissions to certificate folder.")
# change owner for the worker working directory
process, output, error = linuxutil.popen_communicate(["sudo", "chown", "nxautomation:omiusers", "-R",
"/var/opt/microsoft/omsagent/run/automationworker"])
error = error.decode() if isinstance(error, bytes) else error
if process.returncode != 0:
raise Exception("Unable set group owner on working directory. Error: " + str(error))
else:
print("Successfully set group permissions on working directory.")
# change permission for the worker working directory
process, output, error = linuxutil.popen_communicate(["sudo", "chmod", "gu=rwx", "-R",
"/var/opt/microsoft/omsagent/run/automationworker"])
error = error.decode() if isinstance(error, bytes) else error
if process.returncode != 0:
raise Exception("Unable set permissions on working directory. Error: " + str(error))
else:
print("Successfully set permissions on working directory.")
# explicitly prevent others from accessing the worker working directory
process, output, error = linuxutil.popen_communicate(["sudo", "chmod", "o=", "-R",
"/var/opt/microsoft/omsagent/run/automationworker"])
error = error.decode() if isinstance(error, bytes) else error
if process.returncode != 0:
raise Exception("Unable set permissions on working directory. Error: " + str(error))
else:
print("Successfully set permissions on working directory.")
proxy_paths = ["/etc/opt/microsoft/omsagent/conf/proxy.conf", "/etc/opt/microsoft/omsagent/proxy.conf"]
for path in proxy_paths:
if os.path.exists(path):
process, output, error = linuxutil.popen_communicate(["sudo", "chmod", "g+r", path])
error = error.decode() if isinstance(error, bytes) else error
if process.returncode != 0:
raise Exception("Unable set read permission to proxy configuration file. Error: " + str(error))
else:
print("Successfully set read permission to proxy configuration file.")
# create home dir for nxautomation
diydirs.create_persistent_diy_dirs()
def dmidecode():
"""Returns the content of dmidecode."""
print(linuxutil.invoke_dmidecode())
def main():
parser = OptionParser(usage="usage: %prog [--initialize, --dmidecode]",
version="%prog " + str(configuration.get_worker_version()))
parser.add_option("--initialize", action="store_true", dest="initialize", default=False)
parser.add_option("--dmidecode", action="store_true", dest="dmidecode", default=False)
(options, args) = parser.parse_args()
nxautomation_uid = int(pwd.getpwnam("nxautomation").pw_uid)
if os.getuid() != nxautomation_uid:
raise Exception("OMSUtil can only be ran as nxautomation user.")
if options.initialize is True:
initialize()
elif options.dmidecode is True:
dmidecode()
else:
raise Exception("No option specified.")
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.