code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package defines the CGS units. They are also available in the
top-level `astropy.units` namespace.
"""
from fractions import Fraction
from . import si
from .core import UnitBase, def_unit
_ns = globals()
def_unit(['cm', 'centimeter'], si.cm, namespace=_ns, prefixes=False)
g = si.g
s = si.s
C = si.C
rad = si.rad
sr = si.sr
cd = si.cd
K = si.K
deg_C = si.deg_C
mol = si.mol
##########################################################################
# ACCELERATION
def_unit(['Gal', 'gal'], cm / s ** 2, namespace=_ns, prefixes=True,
doc="Gal: CGS unit of acceleration")
##########################################################################
# ENERGY
# Use CGS definition of erg
def_unit(['erg'], g * cm ** 2 / s ** 2, namespace=_ns, prefixes=True,
doc="erg: CGS unit of energy")
##########################################################################
# FORCE
def_unit(['dyn', 'dyne'], g * cm / s ** 2, namespace=_ns,
prefixes=True,
doc="dyne: CGS unit of force")
##########################################################################
# PRESSURE
def_unit(['Ba', 'Barye', 'barye'], g / (cm * s ** 2), namespace=_ns,
prefixes=True,
doc="Barye: CGS unit of pressure")
##########################################################################
# DYNAMIC VISCOSITY
def_unit(['P', 'poise'], g / (cm * s), namespace=_ns,
prefixes=True,
doc="poise: CGS unit of dynamic viscosity")
##########################################################################
# KINEMATIC VISCOSITY
def_unit(['St', 'stokes'], cm ** 2 / s, namespace=_ns,
prefixes=True,
doc="stokes: CGS unit of kinematic viscosity")
##########################################################################
# WAVENUMBER
def_unit(['k', 'Kayser', 'kayser'], cm ** -1, namespace=_ns,
prefixes=True,
doc="kayser: CGS unit of wavenumber")
###########################################################################
# ELECTRICAL
def_unit(['D', 'Debye', 'debye'], Fraction(1, 3) * 1e-29 * C * si.m,
namespace=_ns, prefixes=True,
doc="Debye: CGS unit of electric dipole moment")
def_unit(['Fr', 'Franklin', 'statcoulomb', 'statC', 'esu'],
g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s ** -1,
namespace=_ns,
doc='Franklin: CGS (ESU) unit of charge')
def_unit(['statA', 'statampere'], Fr * s ** -1, namespace=_ns,
doc='statampere: CGS (ESU) unit of current')
def_unit(['Bi', 'Biot', 'abA', 'abampere'],
g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s ** -1, namespace=_ns,
doc='Biot: CGS (EMU) unit of current')
def_unit(['abC', 'abcoulomb'], Bi * s, namespace=_ns,
doc='abcoulomb: CGS (EMU) of charge')
###########################################################################
# MAGNETIC
def_unit(['G', 'Gauss', 'gauss'], 1e-4 * si.T, namespace=_ns, prefixes=True,
doc="Gauss: CGS unit for magnetic field")
###########################################################################
# BASES
bases = set([cm, g, s, rad, cd, K, mol])
###########################################################################
# CLEANUP
del UnitBase
del def_unit
del si
del Fraction
###########################################################################
# DOCSTRING
# This generates a docstring for this module that describes all of the
# standard units defined here.
from .utils import generate_unit_summary as _generate_unit_summary
if __doc__ is not None:
__doc__ += _generate_unit_summary(globals()) | unknown | codeparrot/codeparrot-clean | ||
import logging
import os
import threading
import time
from fs.osfs import OSFS
from smartfile.errors import RequestError, ResponseError
import common
from definitions import FileDefinition, LocalDefinitionHelper
from errors import FileNotAvailableError, FileDeletedError, MaxTriesError
from errors import UploadError
from worker import Worker
log = logging.getLogger(__name__)
class Uploader(Worker):
def __init__(self, api, sync_dir):
self._api = api
self._sync_dir = sync_dir
self._timeoffset = common.calculate_time_offset()
self._syncFS = OSFS(sync_dir)
def _process_task(self, task):
# Check if the task is already a file definition
if not isinstance(task, FileDefinition):
helper = LocalDefinitionHelper(task.path, self._syncFS)
try:
task = helper.create_definition()
except WindowsError, err:
raise FileDeletedError(err)
# Create a system specific path relative to the sync dir
basepath = os.path.normpath(task.path)
if basepath.startswith("/"):
basepath = basepath.strip("/")
if basepath.startswith('\\'):
basepath = basepath.lstrip('\\')
# Full system path
absolute_path = os.path.join(self._sync_dir, basepath)
# If the task is a file
if not os.path.isdir(absolute_path):
basepath = basepath.replace('\\', '/')
if not basepath.startswith("/"):
basepath = os.path.join("/", basepath)
task_directory = os.path.dirname(basepath)
api_path = "/path/data%s" % basepath
api_path_base = os.path.dirname(api_path)
try:
# create the directory to make sure it exists
self._api.post('/path/oper/mkdir/', path=task_directory)
# upload the file
self._api.post(api_path_base, file=file(absolute_path, 'rb'))
# set the new attributes
except IOError, err:
if err.errno == 2:
raise FileNotAvailableError(err)
except ResponseError, err:
if err.status_code == 404:
# If the file becomes suddenly not available, just ignore
# trying to set its attributes.
pass
elif err.status_code == 409:
# Conflict - Can only upload to an existing directory.
raise UploadError(err)
except RequestError, err:
if err.detail.startswith('HTTPConnectionPool'):
raise MaxTriesError(err)
else:
self._set_attributes(task)
else:
# If the task path is a folder
task_directory = basepath
if not task_directory.startswith("/"):
task_directory = os.path.join("/", task_directory)
task_directory = task_directory.replace('\\', '/')
try:
self._api.post('/path/oper/mkdir/', path=task_directory)
except RequestError, err:
raise MaxTriesError(err)
except Exception, err:
raise UploadError(err)
return task
def _set_attributes(self, task):
checksum = task.checksum
modified = task.modified.replace(microsecond=0)
checksum_string = "checksum=%s" % checksum
modified_string = "modified=%s" % modified
apiPath = "/path/info%s" % task.path
try:
self.__set_attributes(apiPath, checksum_string, modified_string)
except ResponseError, err:
if err.status_code == 404:
"""
If we try setting attributes to a file too soon, SmartFile
gives us an error, so sleep the thread for a bit.
"""
time.sleep(1)
# Now try setting the attributes again
self.__set_attributes(apiPath, checksum_string,
modified_string)
elif err.status_code == 500:
self.__set_attributes(apiPath, checksum_string,
modified_string)
def __set_attributes(self, api_path, checksum_string, modified_string):
request_properties = [checksum_string, modified_string]
try:
self._api.post(api_path, attributes=request_properties)
except ResponseError, err:
if err.status_code == 404:
# If the file becomes suddenly not available, just ignore
# trying to set its attributes
pass
if err.status_code == 500:
# Ignore server errors since they shouldnt happen anyways
pass
except RequestError, err:
if err.detail.startswith('HTTPConnectionPool'):
raise MaxTriesError(err)
class UploadWorker(threading.Thread):
def __init__(self, queue, api, sync_dir, remote_files, realtime=False):
threading.Thread.__init__(self)
self._uploader = Uploader(api, sync_dir)
self._queue = queue
self._remote_files = remote_files
self._realtime = realtime
def run(self):
while True:
log.debug("Getting a new task.")
self._current_task = None
self._current_task = self._queue.get()
try:
log.debug("Processing: " + self._current_task.path)
result = self._uploader.process_task(self._current_task)
# Update the remote files dictionary to reflect the new file
self._remote_files[result.path] = result
except FileNotAvailableError:
# The file was not available when uploading it
log.warning("File is not yet available: " +
self._current_task.path)
self.try_task_later()
except MaxTriesError:
log.warning("Connection error occured while uploading: " +
self._current_task.path)
self.try_task_later()
except UploadError:
log.warning("Folders were not created properly for: " +
self._current_task.path)
self.try_task_later()
except FileDeletedError:
log.warning("The file was deleted before trying to upload:" +
self._current_task.path)
else:
# Notify the realtime messaging system of the upload
if self._realtime:
log.debug("Sending an update message about: " +
self._current_task.path)
self._realtime.update(self._current_task)
log.debug("Task complete.")
self._queue.task_done()
def try_task_later(self):
self._queue.put(self._current_task)
def cancel(self):
log.debug("Task cancelled: " + self._current_task.path)
self._uploader.cancel_task()
@property
def current_task(self):
return self._current_task | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* nodeBitmapIndexscan.c
* Routines to support bitmapped index scans of relations
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/executor/nodeBitmapIndexscan.c
*
*-------------------------------------------------------------------------
*/
/*
* INTERFACE ROUTINES
* MultiExecBitmapIndexScan scans a relation using index.
* ExecInitBitmapIndexScan creates and initializes state info.
* ExecReScanBitmapIndexScan prepares to rescan the plan.
* ExecEndBitmapIndexScan releases all storage.
*/
#include "postgres.h"
#include "access/genam.h"
#include "executor/executor.h"
#include "executor/nodeBitmapIndexscan.h"
#include "executor/nodeIndexscan.h"
#include "miscadmin.h"
/* ----------------------------------------------------------------
* ExecBitmapIndexScan
*
* stub for pro forma compliance
* ----------------------------------------------------------------
*/
static TupleTableSlot *
ExecBitmapIndexScan(PlanState *pstate)
{
elog(ERROR, "BitmapIndexScan node does not support ExecProcNode call convention");
return NULL;
}
/* ----------------------------------------------------------------
* MultiExecBitmapIndexScan(node)
* ----------------------------------------------------------------
*/
Node *
MultiExecBitmapIndexScan(BitmapIndexScanState *node)
{
TIDBitmap *tbm;
IndexScanDesc scandesc;
double nTuples = 0;
bool doscan;
/* must provide our own instrumentation support */
if (node->ss.ps.instrument)
InstrStartNode(node->ss.ps.instrument);
/*
* extract necessary information from index scan node
*/
scandesc = node->biss_ScanDesc;
/*
* If we have runtime keys and they've not already been set up, do it now.
* Array keys are also treated as runtime keys; note that if ExecReScan
* returns with biss_RuntimeKeysReady still false, then there is an empty
* array key so we should do nothing.
*/
if (!node->biss_RuntimeKeysReady &&
(node->biss_NumRuntimeKeys != 0 || node->biss_NumArrayKeys != 0))
{
ExecReScan((PlanState *) node);
doscan = node->biss_RuntimeKeysReady;
}
else
doscan = true;
/*
* Prepare the result bitmap. Normally we just create a new one to pass
* back; however, our parent node is allowed to store a pre-made one into
* node->biss_result, in which case we just OR our tuple IDs into the
* existing bitmap. (This saves needing explicit UNION steps.)
*/
if (node->biss_result)
{
tbm = node->biss_result;
node->biss_result = NULL; /* reset for next time */
}
else
{
/* XXX should we use less than work_mem for this? */
tbm = tbm_create(work_mem * (Size) 1024,
((BitmapIndexScan *) node->ss.ps.plan)->isshared ?
node->ss.ps.state->es_query_dsa : NULL);
}
/*
* Get TIDs from index and insert into bitmap
*/
while (doscan)
{
nTuples += (double) index_getbitmap(scandesc, tbm);
CHECK_FOR_INTERRUPTS();
doscan = ExecIndexAdvanceArrayKeys(node->biss_ArrayKeys,
node->biss_NumArrayKeys);
if (doscan) /* reset index scan */
index_rescan(node->biss_ScanDesc,
node->biss_ScanKeys, node->biss_NumScanKeys,
NULL, 0);
}
/* must provide our own instrumentation support */
if (node->ss.ps.instrument)
InstrStopNode(node->ss.ps.instrument, nTuples);
return (Node *) tbm;
}
/* ----------------------------------------------------------------
* ExecReScanBitmapIndexScan(node)
*
* Recalculates the values of any scan keys whose value depends on
* information known at runtime, then rescans the indexed relation.
* ----------------------------------------------------------------
*/
void
ExecReScanBitmapIndexScan(BitmapIndexScanState *node)
{
ExprContext *econtext = node->biss_RuntimeContext;
/*
* Reset the runtime-key context so we don't leak memory as each outer
* tuple is scanned. Note this assumes that we will recalculate *all*
* runtime keys on each call.
*/
if (econtext)
ResetExprContext(econtext);
/*
* If we are doing runtime key calculations (ie, any of the index key
* values weren't simple Consts), compute the new key values.
*
* Array keys are also treated as runtime keys; note that if we return
* with biss_RuntimeKeysReady still false, then there is an empty array
* key so no index scan is needed.
*/
if (node->biss_NumRuntimeKeys != 0)
ExecIndexEvalRuntimeKeys(econtext,
node->biss_RuntimeKeys,
node->biss_NumRuntimeKeys);
if (node->biss_NumArrayKeys != 0)
node->biss_RuntimeKeysReady =
ExecIndexEvalArrayKeys(econtext,
node->biss_ArrayKeys,
node->biss_NumArrayKeys);
else
node->biss_RuntimeKeysReady = true;
/* reset index scan */
if (node->biss_RuntimeKeysReady)
index_rescan(node->biss_ScanDesc,
node->biss_ScanKeys, node->biss_NumScanKeys,
NULL, 0);
}
/* ----------------------------------------------------------------
* ExecEndBitmapIndexScan
* ----------------------------------------------------------------
*/
void
ExecEndBitmapIndexScan(BitmapIndexScanState *node)
{
Relation indexRelationDesc;
IndexScanDesc indexScanDesc;
/*
* extract information from the node
*/
indexRelationDesc = node->biss_RelationDesc;
indexScanDesc = node->biss_ScanDesc;
/*
* When ending a parallel worker, copy the statistics gathered by the
* worker back into shared memory so that it can be picked up by the main
* process to report in EXPLAIN ANALYZE
*/
if (node->biss_SharedInfo != NULL && IsParallelWorker())
{
IndexScanInstrumentation *winstrument;
Assert(ParallelWorkerNumber <= node->biss_SharedInfo->num_workers);
winstrument = &node->biss_SharedInfo->winstrument[ParallelWorkerNumber];
/*
* We have to accumulate the stats rather than performing a memcpy.
* When a Gather/GatherMerge node finishes it will perform planner
* shutdown on the workers. On rescan it will spin up new workers
* which will have a new BitmapIndexScanState and zeroed stats.
*/
winstrument->nsearches += node->biss_Instrument.nsearches;
}
/*
* close the index relation (no-op if we didn't open it)
*/
if (indexScanDesc)
index_endscan(indexScanDesc);
if (indexRelationDesc)
index_close(indexRelationDesc, NoLock);
}
/* ----------------------------------------------------------------
* ExecInitBitmapIndexScan
*
* Initializes the index scan's state information.
* ----------------------------------------------------------------
*/
BitmapIndexScanState *
ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
{
BitmapIndexScanState *indexstate;
LOCKMODE lockmode;
/* check for unsupported flags */
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
* create state structure
*/
indexstate = makeNode(BitmapIndexScanState);
indexstate->ss.ps.plan = (Plan *) node;
indexstate->ss.ps.state = estate;
indexstate->ss.ps.ExecProcNode = ExecBitmapIndexScan;
/* normally we don't make the result bitmap till runtime */
indexstate->biss_result = NULL;
/*
* We do not open or lock the base relation here. We assume that an
* ancestor BitmapHeapScan node is holding AccessShareLock (or better) on
* the heap relation throughout the execution of the plan tree.
*/
indexstate->ss.ss_currentRelation = NULL;
indexstate->ss.ss_currentScanDesc = NULL;
/*
* Miscellaneous initialization
*
* We do not need a standard exprcontext for this node, though we may
* decide below to create a runtime-key exprcontext
*/
/*
* initialize child expressions
*
* We don't need to initialize targetlist or qual since neither are used.
*
* Note: we don't initialize all of the indexqual expression, only the
* sub-parts corresponding to runtime keys (see below).
*/
/*
* If we are just doing EXPLAIN (ie, aren't going to run the plan), stop
* here. This allows an index-advisor plugin to EXPLAIN a plan containing
* references to nonexistent indexes.
*/
if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
return indexstate;
/* Open the index relation. */
lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode;
indexstate->biss_RelationDesc = index_open(node->indexid, lockmode);
/*
* Initialize index-specific scan state
*/
indexstate->biss_RuntimeKeysReady = false;
indexstate->biss_RuntimeKeys = NULL;
indexstate->biss_NumRuntimeKeys = 0;
/*
* build the index scan keys from the index qualification
*/
ExecIndexBuildScanKeys((PlanState *) indexstate,
indexstate->biss_RelationDesc,
node->indexqual,
false,
&indexstate->biss_ScanKeys,
&indexstate->biss_NumScanKeys,
&indexstate->biss_RuntimeKeys,
&indexstate->biss_NumRuntimeKeys,
&indexstate->biss_ArrayKeys,
&indexstate->biss_NumArrayKeys);
/*
* If we have runtime keys or array keys, we need an ExprContext to
* evaluate them. We could just create a "standard" plan node exprcontext,
* but to keep the code looking similar to nodeIndexscan.c, it seems
* better to stick with the approach of using a separate ExprContext.
*/
if (indexstate->biss_NumRuntimeKeys != 0 ||
indexstate->biss_NumArrayKeys != 0)
{
ExprContext *stdecontext = indexstate->ss.ps.ps_ExprContext;
ExecAssignExprContext(estate, &indexstate->ss.ps);
indexstate->biss_RuntimeContext = indexstate->ss.ps.ps_ExprContext;
indexstate->ss.ps.ps_ExprContext = stdecontext;
}
else
{
indexstate->biss_RuntimeContext = NULL;
}
/*
* Initialize scan descriptor.
*/
indexstate->biss_ScanDesc =
index_beginscan_bitmap(indexstate->biss_RelationDesc,
estate->es_snapshot,
&indexstate->biss_Instrument,
indexstate->biss_NumScanKeys);
/*
* If no run-time keys to calculate, go ahead and pass the scankeys to the
* index AM.
*/
if (indexstate->biss_NumRuntimeKeys == 0 &&
indexstate->biss_NumArrayKeys == 0)
index_rescan(indexstate->biss_ScanDesc,
indexstate->biss_ScanKeys, indexstate->biss_NumScanKeys,
NULL, 0);
/*
* all done.
*/
return indexstate;
}
/* ----------------------------------------------------------------
* ExecBitmapIndexScanEstimate
*
* Compute the amount of space we'll need in the parallel
* query DSM, and inform pcxt->estimator about our needs.
* ----------------------------------------------------------------
*/
void
ExecBitmapIndexScanEstimate(BitmapIndexScanState *node, ParallelContext *pcxt)
{
Size size;
/*
* Parallel bitmap index scans are not supported, but we still need to
* store the scan's instrumentation in DSM during parallel query
*/
if (!node->ss.ps.instrument || pcxt->nworkers == 0)
return;
size = offsetof(SharedIndexScanInstrumentation, winstrument) +
pcxt->nworkers * sizeof(IndexScanInstrumentation);
shm_toc_estimate_chunk(&pcxt->estimator, size);
shm_toc_estimate_keys(&pcxt->estimator, 1);
}
/* ----------------------------------------------------------------
* ExecBitmapIndexScanInitializeDSM
*
* Set up bitmap index scan shared instrumentation.
* ----------------------------------------------------------------
*/
void
ExecBitmapIndexScanInitializeDSM(BitmapIndexScanState *node,
ParallelContext *pcxt)
{
Size size;
/* don't need this if not instrumenting or no workers */
if (!node->ss.ps.instrument || pcxt->nworkers == 0)
return;
size = offsetof(SharedIndexScanInstrumentation, winstrument) +
pcxt->nworkers * sizeof(IndexScanInstrumentation);
node->biss_SharedInfo =
(SharedIndexScanInstrumentation *) shm_toc_allocate(pcxt->toc,
size);
shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id,
node->biss_SharedInfo);
/* Each per-worker area must start out as zeroes */
memset(node->biss_SharedInfo, 0, size);
node->biss_SharedInfo->num_workers = pcxt->nworkers;
}
/* ----------------------------------------------------------------
* ExecBitmapIndexScanInitializeWorker
*
* Copy relevant information from TOC into planstate.
* ----------------------------------------------------------------
*/
void
ExecBitmapIndexScanInitializeWorker(BitmapIndexScanState *node,
ParallelWorkerContext *pwcxt)
{
/* don't need this if not instrumenting */
if (!node->ss.ps.instrument)
return;
node->biss_SharedInfo = (SharedIndexScanInstrumentation *)
shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
}
/* ----------------------------------------------------------------
* ExecBitmapIndexScanRetrieveInstrumentation
*
* Transfer bitmap index scan statistics from DSM to private memory.
* ----------------------------------------------------------------
*/
void
ExecBitmapIndexScanRetrieveInstrumentation(BitmapIndexScanState *node)
{
SharedIndexScanInstrumentation *SharedInfo = node->biss_SharedInfo;
size_t size;
if (SharedInfo == NULL)
return;
/* Create a copy of SharedInfo in backend-local memory */
size = offsetof(SharedIndexScanInstrumentation, winstrument) +
SharedInfo->num_workers * sizeof(IndexScanInstrumentation);
node->biss_SharedInfo = palloc(size);
memcpy(node->biss_SharedInfo, SharedInfo, size);
} | c | github | https://github.com/postgres/postgres | src/backend/executor/nodeBitmapIndexscan.c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from os import path
from setuptools import setup
from distutils.cmd import Command
NAME = 'redpipe'
ROOTDIR = path.abspath(os.path.dirname(__file__))
with open(os.path.join(ROOTDIR, 'README.rst')) as f:
readme = f.read()
with open(os.path.join(ROOTDIR, 'docs', 'release-notes.rst')) as f:
history = f.read()
with open(os.path.join(ROOTDIR, 'redpipe', 'VERSION')) as f:
version = str(f.read().strip())
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
import subprocess
raise SystemExit(
subprocess.call([sys.executable, '-m', 'test']))
cmdclass = {'test': TestCommand}
ext_modules = []
setup(
name=NAME,
version=version,
description='Easy Redis pipelines',
author='John Loehrer',
author_email='72squared@gmail.com',
url='https://github.com/72squared/%s' % NAME,
download_url='https://github.com/72squared/%s/archive/%s.tar.gz' %
(NAME, version),
keywords='redis redis-pipeline orm database',
packages=[NAME],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Environment :: Web Environment',
'Operating System :: POSIX'],
license='MIT',
install_requires=['redis>=2.10.2', 'six'],
tests_require=['redislite>=3.0.271', 'redis-py-cluster>=1.3.0'],
include_package_data=True,
long_description=readme + '\n\n' + history,
cmdclass=cmdclass,
ext_modules=ext_modules
) | unknown | codeparrot/codeparrot-clean | ||
from django.shortcuts import render
from django.template.response import TemplateResponse
from models import *
import random
# Create your views here.
def reverse_pitch(request):
# "Static" text here. OPTIONAL
intro = "US Ignite Will Convene Reverse Pitch Events in Nine Communities Throughout the United States "
desc = "From Arizona to Texas to Vermont, US Ignite will co-host and sponsor multiple reverse pitch events for technologists and entrepreneurs throughout February and March. This reverse pitch competition is for makers, developers and entrepreneurs developing ultra high-bandwidth hardware, software and applications that want to impact their local community. Entrepreneurs will hear pitches from civic organizations and then compete for part of a prize pool of cash and in-kind services. US Ignite will co-sponsor these events in nine of the organization's Smart Gigabit Communities, which are a network of communities nationwide that have each committed to leverage next-generation smart city and Internet technologies to keep pace with the world's rapidly changing, technology-driven economy."
random_int = random.uniform(0.1, 2.0)
pitch_list = Pitch.objects.filter(active=True).order_by('order').all()[:6]
context = {
'intro': intro,
'desc': desc,
'pitch_list': pitch_list,
'random_int': random_int
}
return TemplateResponse(request, 'smart_gigabit_communities/reverse_pitch.html', context) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
*
* Scatterlist splitting helpers.
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
struct sg_splitter {
struct scatterlist *in_sg0;
int nents;
off_t skip_sg0;
unsigned int length_last_sg;
struct scatterlist *out_sg;
};
static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
off_t skip, const size_t *sizes,
struct sg_splitter *splitters, bool mapped)
{
int i;
unsigned int sglen;
size_t size = sizes[0], len;
struct sg_splitter *curr = splitters;
struct scatterlist *sg;
for (i = 0; i < nb_splits; i++) {
splitters[i].in_sg0 = NULL;
splitters[i].nents = 0;
}
for_each_sg(in, sg, nents, i) {
sglen = mapped ? sg_dma_len(sg) : sg->length;
if (skip > sglen) {
skip -= sglen;
continue;
}
len = min_t(size_t, size, sglen - skip);
if (!curr->in_sg0) {
curr->in_sg0 = sg;
curr->skip_sg0 = skip;
}
size -= len;
curr->nents++;
curr->length_last_sg = len;
while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
curr++;
size = *(++sizes);
skip += len;
len = min_t(size_t, size, sglen - skip);
curr->in_sg0 = sg;
curr->skip_sg0 = skip;
curr->nents = 1;
curr->length_last_sg = len;
size -= len;
}
skip = 0;
if (!size && --nb_splits > 0) {
curr++;
size = *(++sizes);
}
if (!nb_splits)
break;
}
return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
}
static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
{
int i, j;
struct scatterlist *in_sg, *out_sg;
struct sg_splitter *split;
for (i = 0, split = splitters; i < nb_splits; i++, split++) {
in_sg = split->in_sg0;
out_sg = split->out_sg;
for (j = 0; j < split->nents; j++, out_sg++) {
*out_sg = *in_sg;
if (!j) {
out_sg->offset += split->skip_sg0;
out_sg->length -= split->skip_sg0;
}
sg_dma_address(out_sg) = 0;
sg_dma_len(out_sg) = 0;
in_sg = sg_next(in_sg);
}
out_sg[-1].length = split->length_last_sg;
sg_mark_end(out_sg - 1);
}
}
static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
{
int i, j;
struct scatterlist *in_sg, *out_sg;
struct sg_splitter *split;
for (i = 0, split = splitters; i < nb_splits; i++, split++) {
in_sg = split->in_sg0;
out_sg = split->out_sg;
for (j = 0; j < split->nents; j++, out_sg++) {
sg_dma_address(out_sg) = sg_dma_address(in_sg);
sg_dma_len(out_sg) = sg_dma_len(in_sg);
if (!j) {
sg_dma_address(out_sg) += split->skip_sg0;
sg_dma_len(out_sg) -= split->skip_sg0;
}
in_sg = sg_next(in_sg);
}
sg_dma_len(--out_sg) = split->length_last_sg;
}
}
/**
* sg_split - split a scatterlist into several scatterlists
* @in: the input sg list
* @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
* @skip: the number of bytes to skip in the input sg list
* @nb_splits: the number of desired sg outputs
* @split_sizes: the respective size of each output sg list in bytes
* @out: an array where to store the allocated output sg lists
* @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
* be NULL if sglist not already mapped (in_mapped_nents = 0)
* @gfp_mask: the allocation flag
*
* This function splits the input sg list into nb_splits sg lists, which are
* allocated and stored into out.
* The @in is split into :
* - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
* - @out[1], which covers bytes [@skip + split_sizes[0] ..
* @skip + @split_sizes[0] + @split_sizes[1] -1]
* etc ...
* It will be the caller's duty to kfree() out array members.
*
* Returns 0 upon success, or error code
*/
int sg_split(struct scatterlist *in, const int in_mapped_nents,
const off_t skip, const int nb_splits,
const size_t *split_sizes,
struct scatterlist **out, int *out_mapped_nents,
gfp_t gfp_mask)
{
int i, ret;
struct sg_splitter *splitters;
splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
if (!splitters)
return -ENOMEM;
ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
splitters, false);
if (ret < 0)
goto err;
ret = -ENOMEM;
for (i = 0; i < nb_splits; i++) {
splitters[i].out_sg = kmalloc_array(splitters[i].nents,
sizeof(struct scatterlist),
gfp_mask);
if (!splitters[i].out_sg)
goto err;
}
/*
* The order of these 3 calls is important and should be kept.
*/
sg_split_phys(splitters, nb_splits);
if (in_mapped_nents) {
ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
split_sizes, splitters, true);
if (ret < 0)
goto err;
sg_split_mapped(splitters, nb_splits);
}
for (i = 0; i < nb_splits; i++) {
out[i] = splitters[i].out_sg;
if (out_mapped_nents)
out_mapped_nents[i] = splitters[i].nents;
}
kfree(splitters);
return 0;
err:
for (i = 0; i < nb_splits; i++)
kfree(splitters[i].out_sg);
kfree(splitters);
return ret;
}
EXPORT_SYMBOL(sg_split); | c | github | https://github.com/torvalds/linux | lib/sg_split.c |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutExceptions(Koan):
class MySpecialError(RuntimeError):
pass
def test_exceptions_inherit_from_exception(self):
mro = self.MySpecialError.mro()
self.assertEqual(__, mro[1].__name__)
self.assertEqual(__, mro[2].__name__)
self.assertEqual(__, mro[3].__name__)
self.assertEqual(__, mro[4].__name__)
def test_try_clause(self):
result = None
try:
self.fail("Oops")
except Exception as ex:
result = 'exception handled'
ex2 = ex
self.assertEqual(__, result)
self.assertEqual(__, isinstance(ex2, Exception))
self.assertEqual(__, isinstance(ex2, RuntimeError))
self.assertTrue(issubclass(RuntimeError, Exception), \
"RuntimeError is a subclass of Exception")
self.assertEqual(__, ex2.args[0])
def test_raising_a_specific_error(self):
result = None
try:
raise self.MySpecialError("My Message")
except self.MySpecialError as ex:
result = 'exception handled'
msg = ex.args[0]
self.assertEqual(__, result)
self.assertEqual(__, msg)
def test_else_clause(self):
result = None
try:
pass
except RuntimeError:
result = 'it broke'
pass
else:
result = 'no damage done'
self.assertEqual(__, result)
def test_finally_clause(self):
result = None
try:
self.fail("Oops")
except:
# no code here
pass
finally:
result = 'always run'
self.assertEqual(__, result) | unknown | codeparrot/codeparrot-clean | ||
"""
A Cobbler repesentation of an IP network.
Copyright 2009, Red Hat, Inc
John Eckersberg <jeckersb@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import utils
import item
import time
from cexceptions import *
from utils import _, _IP, _CIDR
FIELDS = [
["name",None,0,"Name",True,"Ex: testlab",0],
["cidr",None,0,"CIDR",True,"CIDR range of this network",0],
["address",None,0,"Address",True,"",0],
["gateway",None,0,"Gateway",True,"",0],
["broadcast",None,0,"Broadcast",True,"",0],
["name_servers",None,0,"Name Servers",True,"",0],
["reserved",None,0,"Reserved",True,"",0],
["used_addresses",None,0,"Used Addresses",False,"",0],
["free_addresses",None,0,"Free Addresses",False,"",0],
["comment","",0,"Comment",True,"Free form text description",0],
["ctime",0,0,"",False,"",0],
["mtime",0,0,"",False,"",0],
["owners","SETTINGS:default_ownership",0,"Owners",True,"Owners list for authz_ownership (space delimited)",0],
["uid",None,0,"",False,"",0],
]
class Network(item.Item):
TYPE_NAME = _("network")
COLLECTION_TYPE = "network"
def make_clone(self):
ds = self.to_datastruct()
cloned = Network(self.config)
cloned.from_datastruct(ds)
return cloned
def get_fields(self):
return FIELDS
def set_cidr(self, cidr):
if self.cidr == None:
self.free_addresses = [_CIDR(cidr)]
self.cidr = _CIDR(cidr)
return True
def set_address(self, address):
if address is None:
# FIXME: probably wrong handling, need to reimpl "is_valid"
# logic for all objects?
self.address = None
return
address = address.strip()
if address == "":
self.address = address
else:
if self.address != None:
self._add_to_free(address)
self.address = _IP(address)
self._remove_from_free(self.address)
return True
def set_gateway(self, gateway):
if gateway is None:
# FIXME
self.gateway = None
return
gateway = gateway.strip()
if gateway == "":
self.gateway = gateway
else:
if self.gateway != None:
self._add_to_free(gateway)
self.gateway = _IP(gateway)
self._remove_from_free(self.gateway)
return True
def set_broadcast(self, broadcast):
if broadcast is None:
# FIXME:
self.broadcast = None
return
broadcast = broadcast.strip()
if broadcast == "":
self.broadcast = broadcast
else:
if self.broadcast != None:
self._add_to_free(broadcast)
self.broadcast = _IP(broadcast)
self._remove_from_free(self.broadcast)
return True
def set_name_servers(self, data):
data = utils.input_string_or_list(data)
self.name_servers = data
return True
def set_reserved(self, reserved):
# FIXME: what should this do?
return True
def set_used_addresses(self, junk):
# FIXME: what should this do? It was missing before
return True
def set_free_addresses(self, junk):
# FIXME: what should this do? It was missing before
return True
def get_assigned_address(self, system, intf):
"""
Get the address in the network assigned to an interface of a system.
"""
try:
return str(self.used_addresses[(system, intf)])
except KeyError:
return None
def subscribe_system(self, system, intf, ip=None):
"""
Join a system to the network. If ip is passed in, try to
claim that specific address, otherwise just grab the first
free address.
"""
if not ip:
if self.free_address_count() == 0:
raise CX(_("Network %s has no free addresses" % self.cidr))
ip = self.free_addresses[0][0]
self._allocate_address(system, intf, ip)
def unsubscribe_system(self, system, intf):
"""
Remove a system from the network. Allocate it's address back
into the free pool.
"""
addr = self.get_assigned_address(system, intf)
if not addr:
raise CX(_("Attempting to unsubscribe %s:%s from %s, but not subscribed" % (system, intf, self.name)))
self._remove_from_used(addr)
self._add_to_free(addr)
def _addr_available(self, addr):
"""
Is addr free in the network?
"""
for cidr in self.free_addresses:
if addr in cidr:
return True
return False
def _add_to_free(self, addr, compact=True):
"""
Add addr to the list of free addresses. If compact is True,
then take the list of CIDRs in free_addresses and compact it.
"""
addr = _IP(addr).cidr()
self.free_addresses.append(addr)
if compact:
self.free_addreses = self._compact(self.free_addresses)
def _remove_from_free(self, addr):
"""
Take addr off of the list of free addresses
"""
self.free_addresses = self._subtract_and_flatten(self.free_addresses, [addr])
self.free_addresses.sort()
def _add_to_used(self, system, intf, addr):
"""
Add system,intf with address to used_addresses. Make sure no
entry already exists.
"""
if (system, intf) in self.used_addresses:
# should really throw an error if it's already there
# probably a sign something has gone wrong elsewhere
raise CX(_("Trying to add %s to used_addresses but is already there!" % i))
self.used_addresses[(system,intf)] = addr
def _remove_from_used(self, addr):
"""
Take addr off of the list of used addresses
"""
for k,v in self.used_addresses.iteritems():
if v == addr:
del(self.used_addresses[k])
return
def _allocate_address(self, system, intf, addr):
"""
Try to allocate addr to system on interface intf.
"""
if not self._addr_available(addr):
raise CX(_("Address %s is not available for allocation" % addr))
self._remove_from_free(addr)
self._add_to_used(system, intf, addr)
def _subtract_and_flatten(self, cidr_list, remove_list):
"""
For each item I in remove_list, find the cidr C in cidr_list
that contains I. Perform the subtraction C - I which returns
a new minimal cidr list not containing I. Replace C with this
result, flattened out so we don't get multiple levels of
lists.
"""
for item in remove_list:
for i in range(len(cidr_list)):
if item in cidr_list[i]:
cidr_list += cidr_list[i] - item
del(cidr_list[i])
break
return cidr_list
def _compact(self, cidr_list, sort_first=True):
"""
Compacts a list of CIDR objects down to a minimal-length list L
such that the set of IP addresses contained in L is the same as
the original.
For example:
[10.0.0.0/32, 10.0.0.1/32, 10.0.0.2/32, 10.0.0.3/32]
becomes
[10.0.0.0/30]
"""
if len(cidr_list) <= 1:
return cidr_list
if sort_first:
cidr_list.sort()
did_compact = False
skip_next = False
compacted = []
for i in range(1, len(cidr_list)):
cur = cidr_list[i]
prev = cidr_list[i-1]
if skip_next:
skip_next = False
continue
last = prev[-1]
last += 1
last = last.cidr()
if last == cur[0].cidr() and prev.size() == cur.size():
compacted.append(CIDR('%s/%d' % (str(prev[0]), prev.prefixlen - 1)))
did_compact = True
skip_next = True
if did_compact:
return compact(compacted, sort_first=False)
else:
return cidr_list
def used_address_count(self):
return len(self.used_addresses)
def free_address_count(self):
total = 0
for item in self.free_addresses:
total += len(item)
return total
def get_parent(self):
"""
currently the Cobbler object space does not support subobjects of this object
as it is conceptually not useful.
"""
return None | unknown | codeparrot/codeparrot-clean | ||
"""engine.SCons.Variables.PackageVariable
This file defines the option type for SCons implementing 'package
activation'.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Usage example:
Examples:
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existance)
To replace autoconf's --with-xxx=yyy
opts = Variables()
opts.Add(PackageVariable('x11',
'use X11 installed here (yes = search some places',
'yes'))
...
if env['x11'] == True:
dir = ... search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... build with x11 ...
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PackageVariable.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
__all__ = ['PackageVariable',]
import SCons.Errors
__enable_strings = ('1', 'yes', 'true', 'on', 'enable', 'search')
__disable_strings = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
"""
"""
lval = val.lower()
if lval in __enable_strings: return True
if lval in __disable_strings: return False
#raise ValueError("Invalid value for boolean option: %s" % val)
return val
def _validator(key, val, env, searchfunc):
# NB: searchfunc is currenty undocumented and unsupported
"""
"""
# todo: write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'package list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
A 'package list' option may either be 'all', 'none' or a list of
package names (seperated by space).
"""
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k,v,e,searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | unknown | codeparrot/codeparrot-clean | ||
it("should allow to use top-level-await", () => {
return import("./reexport").then(({ default: value, other }) => {
expect(value).toBe(42);
expect(other).toBe(42);
});
}); | javascript | github | https://github.com/webpack/webpack | test/cases/async-modules/top-level-await/index.js |
import unittest
from nativedroid_server import *
CHUNK_SIZE = 1024 * 1024 # 1MB
def get_file_chunks(filename):
with open(filename, 'rb') as f:
while True:
piece = f.read(CHUNK_SIZE)
if len(piece) == 0:
return
yield LoadBinaryRequest(buffer=piece)
class NativeDroidServerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
native_ss_file = pkg_resources.resource_filename('nativedroid.data', 'sourceAndSinks/NativeSourcesAndSinks.txt')
java_ss_file = pkg_resources.resource_filename('nativedroid.data', 'sourceAndSinks/TaintSourcesAndSinks.txt')
add_NativeDroidServicer_to_server(
NativeDroidServer('/tmp/binaries/', 'localhost', 55001, native_ss_file, java_ss_file), cls.server)
cls.server.add_insecure_port('[::]:50001')
cls.server.start()
logger.info('Server started.')
channel = grpc.insecure_channel('localhost:50001')
cls.stub = NativeDroidStub(channel)
file_path = 'testdata/libleak.so'
chunks_generator = get_file_chunks(file_path)
cls._lb_response = cls.stub.LoadBinary(chunks_generator)
@classmethod
def tearDownClass(cls):
cls.server.stop(0)
logger.info('Server stopped.')
cls.server = None
cls.stub = None
path = cls._lb_response.so_digest
if os.path.exists(path):
os.remove(path)
cls._lb_response = None
def testLoadBinary(self):
self.assertEqual(self._lb_response.length, os.path.getsize('testdata/libleak.so'))
def testHasSymbol(self):
response = self.stub.HasSymbol(HasSymbolRequest(so_digest=self._lb_response.so_digest,
symbol='Java_org_arguslab_native_1leak_MainActivity_send'))
self.assertTrue(response.has_symbol)
def testGenSummary(self):
package_pb = JavaPackage(name='org')
package_pb = JavaPackage(name='arguslab', parent=package_pb)
package_pb = JavaPackage(name='native_leak', parent=package_pb)
class_type_pb = ClassType(package=package_pb, name='MainActivity', unknown=False)
java_type_pb = JavaType(class_type=class_type_pb)
package_pb = JavaPackage(name='java')
package_pb = JavaPackage(name='lang', parent=package_pb)
class_type_pb = ClassType(package=package_pb, name='String', unknown=False)
proto = MethodProto(param_types=[JavaType(class_type=class_type_pb)],
return_void_type=VoidType())
method_signature_pb = MethodSignature(owner=java_type_pb, name='send', proto=proto)
request = GenSummaryRequest(apk_digest='', so_digest=self._lb_response.so_digest,
jni_func='Java_org_arguslab_native_1leak_MainActivity_send',
method_signature=method_signature_pb, depth=1)
response = self.stub.GenSummary(request)
self.assertEqual('Lorg/arguslab/native_leak/MainActivity;.send:(Ljava/lang/String;)V -> _SINK_ 1',
response.taint)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
import threading
import subprocess
import logging
import time
import os
import traceback
import errno
from collections import namedtuple, deque
from itertools import chain
import psutil
import conf
import client
import utils
import rpc.ttypes as ttypes
from profile import SystemProfiler as _SystemProfiler
logger = logging.getLogger(__name__)
__all__ = ['Profiler', 'ResourceMgr', 'ProcessMgr']
_RunningProc = namedtuple("RunningProc", "processCmd pthread cpus")
#
# _ResourceManager
#
class _ResourceManager(object):
"""
The ResourceManager keeps track of the bookable resources on the
machine. This is currently just cores, but memory and GPUS
in the future.
"""
def __init__(self):
self.__slots = deque(xrange(Profiler.physicalCpus))
self.__slots_all = tuple(self.__slots)
self.__lock = threading.RLock()
logger.info("Intializing resource manager with %d physical cores.",
Profiler.physicalCpus)
def checkout(self, numCores):
if numCores < 1:
raise ttypes.RndException(1, "Cannot reserve 0 slots")
result = []
with self.__lock:
open_slots = self.__slots
logger.info("Open slots: %s", list(open_slots))
if numCores > len(open_slots):
raise ttypes.RndException(1, "No more open slots")
result = [open_slots.pop() for _ in xrange(numCores)]
logger.info("Checked out CPUS: %s", result)
return result
def checkin(self, cores):
with self.__lock:
self.__slots.extend(cores)
avail, total = len(self.__slots), Profiler.physicalCpus
logger.info("Checked in CPUS: %s; Now available: %d / %d", cores, avail, total)
def getSlots(self):
return list(xrange(Profiler.physicalCpus))
def getOpenSlots(self):
with self.__lock:
return list(self.__slots)
#
# _ProcessManager
#
class _ProcessManager(object):
"""
The ProcessManager keeps track of the running tasks. Each task
is executed in a separate ProcessThread.
"""
SAMPLE_INTERVAL_SEC = 10
def __init__(self):
self.__threads = {}
self.__lock = threading.RLock()
self.__timer = None
self.__isReboot = threading.Event()
self.__isShutdown = threading.Event()
self.__sampler = threading.Thread(target=self._processSampler)
self.__sampler.daemon = True
self.__sampler.start()
self.sendPing(True)
@property
def isReboot(self):
return self.__isReboot.is_set()
def runProcess(self, processCmd, wait=-1):
"""
Takes a RunTaskCommand object, reserves resources,
and starts the process. Default mode is to return None
Optionally, a wait time may be specified in float
seconds, to wait until the job has fully started,
before returning. If wait > -1, return a RunningTask object
"""
cpus = ResourceMgr.checkout(processCmd.cores)
pthread = _ProcessThread(processCmd, cpus)
with self.__lock:
self.__threads[processCmd.procId] = _RunningProc(processCmd, pthread, cpus)
pthread.start()
logger.info("process thread started")
if wait == -1:
return
task = pthread.getRunningTask(wait)
return task
def processFinished(self, processResult, cpus=None):
"""
Callback for when a process has finished running.
Receives the RunTaskResult object.
Deallocates the resources.
"""
with self.__lock:
if cpus is None:
cpus = self.__threads[processResult.procId].cpus
ResourceMgr.checkin(cpus)
try:
del self.__threads[processResult.procId]
except Exception, e:
logger.warn("Process %s not found: %s", processResult.procId, e)
def sendPing(self, isReboot=False, repeat=True):
"""
Ping into the server with current task and resource states.
If repeat is True, schedules another ping at an interval defined
by the rndaemon config.
"""
if self.__isShutdown.is_set():
repeat = False
# TODO: What is the purpose of the isReboot flag?
# Using the internal flag to determine if we are in a
# reboot state.
isReboot = self.__isReboot.is_set()
tasks = self.getRunningTasks()
Profiler.sendPing(tasks, isReboot)
# TODO: Maybe there needs to be a seperate thread for this check
# but for now it is part of the ping loop.
if isReboot and not tasks:
logger.info("Task queue is empty and daemon is scheduled for reboot")
try:
Profiler.reboot()
except ttypes.RndException, e:
# on next loop, the server will see that the system
# is no longer in isReboot state
logger.warn(e.why)
self.__isReboot.clear()
else:
# just in case
return
if repeat:
self.__timer = threading.Timer(conf.NETWORK_PING_INTERVAL, self.sendPing)
self.__timer.daemon = True
self.__timer.start()
def killRunningTask(self, procId, reason):
"""
Kill a currently running task by its procId.
"""
logger.info("kill requested for procId %s, %s", procId, reason)
with self.__lock:
try:
pthread = self.__threads[procId].pthread
except KeyError:
err = "Process %s not found" % procId
logger.warn(err)
# TODO: Raise a proper exception type? or
# fail quietly?
raise ttypes.RndException(1, err)
_, not_killed = pthread.killProcess(reason=reason)
if not_killed:
err = "Failed to kill the following pids for prodId %s: %s" % \
(procId, ','.join(not_killed))
logger.warn(err)
raise ttypes.RndException(1, err)
def getRunningTasks(self):
""" Get a list of all running task objects """
with self.__lock:
tasks = [t.pthread.getRunningTask() for t in self.__threads.itervalues()]
return tasks
def shutdown(self):
"""
Gracefully shut down all running tasks so they can report back in
"""
logger.debug("Shutdown requested for process manager.")
self.__isShutdown.set()
with self.__lock:
threads = [proc.pthread for proc in self.__threads.itervalues()]
for t in threads:
t.shutdown()
logger.debug("Asked %d tasks to quit and report. Waiting for them to complete", len(threads))
for t in threads:
if not t.wait(10):
logger.warn("Thread failed to close down after waiting 10 seconds: %r", t)
self.__threads.clear()
del threads
logger.debug("Done waiting on task shutdown")
def reboot(self, now=False):
"""
reboot (bool now=False)
Reboot the system as soon as it becomes idle. That is,
when no tasks are running.
If now == True, reboot immediately, regardless of any
in-progress render tasks.
"""
# TODO: For now, assuming that even if they aren't root,
# that they may have permission to reboot. This means a
# reboot(now=False) will not raise an exception to the caller.
#
# if os.geteuid() != 0:
# err = "rndaemon not running as user with permission to reboot system"
# raise ttypes.RndException(1, err)
self.__isReboot.set()
if now:
logger.info("*SYSTEM GOING DOWN FOR IMMEDIATE REBOOT*")
# stop all of the tasks
self.shutdown()
with self.__lock:
if self.__timer:
self.__timer.cancel()
# The reboot could happen from the ping if the task
# queue is empty.
self.sendPing(repeat=False)
# Otherwise, the reboot will happen here, regardless
# of whether there are active tasks running.
Profiler.reboot()
else:
logger.info("*Reboot scheduled at next idle event*")
def _processSampler(self):
"""
Loop that updates metrics on every running process
at intervals.
"""
while not self.__isShutdown.is_set():
with self.__lock:
pthreads = [t.pthread for t in self.__threads.itervalues()]
for pthread in pthreads:
pthread.updateMetrics()
time.sleep(self.SAMPLE_INTERVAL_SEC)
#
# RunningTask
#
class RunningTask(ttypes.RunningTask):
"""
Subclass of ttypes.RunningTask that adjusts the
__repr__ to only print a reduces amount of the last
log line string.
"""
def __repr__(self):
D = self.__dict__.copy()
# elide the log string if its too big
lastLog = D.get('lastLog')
if lastLog and len(lastLog) > 50:
D['lastLog'] = '%s...' % lastLog[:47]
L = ('%s=%r' % (key, value) for key, value in D.iteritems())
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
#
# _ProcessThread
#
class _ProcessThread(threading.Thread):
"""
The _ProcessThread wraps a running task.
"""
_DO_DISK_IO = hasattr(psutil.Process, "get_io_counters")
def __init__(self, rtc, cpus=None):
threading.Thread.__init__(self)
self.daemon = True
self.__logfp = None
self.__cpus = cpus or set()
self.__rtc = rtc
self.__pptr = None
self.__logfp = None
self.__pid = -1
self.__killThread = None
self.__wasKilled = threading.Event()
self.__hasStarted = threading.Event()
self.__isShutdown = threading.Event()
self.__progress = 0.0
self.__lastLog = ""
self.__killReason = ""
self.__metrics = {
'rssMb': 0,
'maxRssMb': 0,
'cpuPercent': 0,
'diskIO': ttypes.DiskIO(-1,-1,-1,-1),
}
def __repr__(self):
return "<%s: (procId: %s, pid: %d)>" % (
self.__class__.__name__,
self.__rtc.procId,
self.__pid)
def shutdown(self):
"""
Instruct the process to shutdown gracefully.
Returns the same output as killProcess()
"""
logger.debug("Shutdown request received. Killing %r", self)
self.__isShutdown.set()
self.killProcess(block=False, reason="rndaemon shutdown request received")
def wait(self, timeout=None):
"""
Waits for the process to finish.
By default, blocks indefinitely. Specify a
timeout in float seconds to wait. If the timeout
value is exceeded, return False
Returns True if the task ended.
"""
self.join(timeout)
return not self.isAlive()
def getRunningTask(self, wait=-1):
"""
getRunningTask(float wait=-1) -> RunningTask
Returns a RunningTask instance representing
the current state of the task.
If wait > 0, then wait that many seconds for
the process to start. This is useful if you are
creating the process and then checking its running
task right away. Some information may not be
available until after the thread has gotten the
process running.
"""
if wait > 0:
self.__hasStarted.wait(wait)
rt = RunningTask()
rtc = self.__rtc
rt.jobId = rtc.jobId
rt.procId = rtc.procId
rt.taskId = rtc.taskId
rt.layerId = rtc.layerId
rt.pid = self.__pid
metrics = self.__metrics
rt.rssMb = metrics['rssMb']
rt.cpuPercent = metrics['cpuPercent']
if self._DO_DISK_IO:
rt.diskIO = metrics['diskIO']
rt.progress = self.__progress
rt.lastLog = self.__lastLog or None
return rt
def run(self):
"""
Run method called implicitely by start()
Fires up the process to do the actual task.
Logs output, and records resource metrics.
"""
rtc = self.__rtc
retcode = 1
try:
uid = self.__rtc.uid
cpus = self.__cpus
logger.info("Opening log file: %s", rtc.logFile)
self.__logfp = utils.ProcessLog(self.__rtc.logFile, uid=uid, buffering=1)
self.__logfp.writeLogHeader(rtc)
env = os.environ.copy()
env.update(rtc.env)
parser = None
if rtc.taskTypes:
parser = utils.ProcessLogParser.fromTaskTypes(rtc.taskTypes)
if not parser.progress:
parser = None
opts = {
'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT,
'uid': uid,
'cpus': cpus,
'env': env,
}
cmd, opts = Profiler.getSubprocessOpts(rtc.command, **opts)
logger.info("Running command: %s", rtc.command)
self.__logfp.write("[%s] Running process" % time.strftime("%Y-%m-%d %H:%M:%S"))
self.__logfp.flush()
p = subprocess.Popen(cmd, **opts)
self.__pptr = p
self.__pid = p.pid
self.__hasStarted.set()
logger.info("PID: %d", p.pid)
self.updateMetrics()
writeLog = self.__logfp.write
r_pipe = self.__pptr.stdout
for line in iter(r_pipe.readline, ""):
writeLog(line)
self.__lastLog = line
if parser:
prog = parser.parseProgress(line)
if prog is not None:
self.__progress = prog
if self.__isShutdown.is_set():
break
self.__logfp.write("[%s] Process finished" % time.strftime("%Y-%m-%d %H:%M:%S"))
self.__logfp.flush()
try:
retcode = p.wait()
except OSError, e:
if e.errno != errno.ECHILD:
if not self.__isShutdown.is_set():
raise
r_pipe.close()
logger.debug("Return code: %s", retcode)
except Exception, e:
if self.__isShutdown.is_set():
logger.debug("Thread detected shutdown request. Leaving gracefully.")
else:
logger.warn("Failed to execute command: %s", e)
logger.debug(traceback.format_exc())
finally:
self.__completed(retcode)
def updateMetrics(self):
"""
updateMetrics()
Resample information about the currently running
process tree, and update member attributes.
i.e. rss
"""
# logger.debug("updateMetrics(): %r", self)
rss_bytes = 0
cpu_perc = 0
do_disk_io = self._DO_DISK_IO
if do_disk_io:
disk_io = [0,0,0,0]
try:
root_pid = self.__pid
p = psutil.Process(root_pid)
for proc in chain([p], p.get_children(True)):
this_pid = proc.pid
if proc.status == psutil.STATUS_ZOMBIE:
continue
try:
rss_bytes += proc.get_memory_info().rss
except psutil.Error, e:
logger.debug("Error while getting memory data for pid %r: %s", this_pid, e)
try:
cpu_perc += proc.get_cpu_percent(None)
except psutil.Error, e:
logger.debug("Error while getting cpu data for pid %r: %s", this_pid, e)
if do_disk_io:
try:
counters = proc.get_io_counters()
except psutil.Error, e:
logger.debug("Error while getting disk io data for pid %r: %s", this_pid, e)
for i, val in enumerate(counters):
disk_io[i] += val
except psutil.NoSuchProcess, e:
return
cpu_perc_int = int(round(cpu_perc))
rssMb = rss_bytes / 1024 / 1024
metrics = self.__metrics
maxRss = max(rssMb, metrics['maxRssMb'])
disk_io_t = ttypes.DiskIO(*disk_io) if do_disk_io else None
metrics.update({
'rssMb': rssMb,
'maxRssMb': maxRss,
'cpuPercent': cpu_perc_int,
'diskIO': disk_io_t,
})
logger.debug("metrics: %r", metrics)
def killProcess(self, block=True, reason=''):
"""
killProcess(bool block=True, reason='') -> (list killed_pids, list not_killed)
Stop the entire process tree
Returns a tuple of two lists. The first list contains
the pids from the process tree that were successfully
stopped. The second list contains pids that were not
able to be stopped successfully.
By default the call blocks until the attempt to kill
has completed. Set block=False to issue the kill async.
If the reason for killing the process is passes as a string,
it will be added to the log footer.
"""
self.__killReason = reason
if block:
return self.__killProcess()
# guards against repeat calls to kill while one async
# call is already running
if self.__killThread and self.__killThread.isAlive():
return
t = threading.Thread(target=self.__killProcess)
t.start()
self.__killThread = t
return
def __killProcess(self):
pid = self.__pid
if pid == -1:
return
try:
p = psutil.Process(pid)
except psutil.NoSuchProcess:
return
children = p.get_children(recursive=True)
self.__wasKilled.set()
# kill the top parent
self.__killOneProcess(p)
# make sure each process in the tree is really dead
killed = []
not_killed = []
for child in children:
success = self.__killOneProcess(child)
if success:
killed.append(child.pid)
else:
not_killed.append(child.pid)
return killed, not_killed
def __killOneProcess(self, p):
"""
__killOneProcess(psutil.Process p) -> bool
Try and nicely stop a Process first, then kill it.
Return True if process was killed.
"""
try:
try:
p.wait(0.001)
except psutil.TimeoutExpired:
pass
if not p.is_running():
return True
pid = p.pid
logger.info("Asking nicely for pid %d (%s) to stop", pid, p.name)
p.terminate()
try:
p.wait(5)
except psutil.TimeoutExpired:
pass
if not p.is_running():
return True
logger.info("Killing pid %d (%s)", pid, p.name)
p.kill()
try:
p.wait(1)
except psutil.TimeoutExpired:
pass
if p.is_running():
logger.warn("Failed to properly kill pid %d (taskId: %s)", pid, self.__rtc.taskId)
return False
except psutil.NoSuchProcess:
pass
return True
def __completed(self, retcode):
logger.debug("Process completed: %r, (IsShutdown: %r)", self, self.__isShutdown.is_set())
result = ttypes.RunTaskResult()
result.maxRssMb = self.__metrics['maxRssMb']
result.procId = self.__rtc.procId
result.taskId = self.__rtc.taskId
result.jobId = self.__rtc.jobId
if self.__isShutdown.is_set():
result.exitStatus = 1
result.exitSignal = 86
logger.info("Task closing gracefully from shutdown request")
elif self.__wasKilled.is_set():
result.exitStatus = 1
result.exitSignal = retcode if retcode < 0 else -9
elif retcode < 0:
result.exitStatus = 1
result.exitSignal = retcode
else:
result.exitStatus = retcode
result.exitSignal = 0
logger.info("Process result %s", result)
if not conf.NETWORK_DISABLED:
while True:
try:
service, transport = client.getPlowConnection()
service.taskComplete(result)
transport.close()
break
except Exception, e:
logger.warn("Error talking to plow server, %s, sleeping for 30 seconds", e)
time.sleep(30)
ProcessMgr.processFinished(result, self.__cpus)
if self.__logfp is not None:
attrs = {
'DiskIO': self.__metrics['diskIO'],
'Cpus': len(self.__cpus),
}
if self.__killReason:
attrs['Reason Killed'] = self.__killReason
self.__logfp.writeLogFooterAndClose(result, attrs)
self.__logfp = None
#
# Singleton Instances
#
Profiler = _SystemProfiler()
ResourceMgr = _ResourceManager()
ProcessMgr = _ProcessManager() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import os
import numpy as np
import h5py
import tempfile
import urllib, urllib2
import cStringIO
from PIL import Image
from contextlib import closing
import cv2
sys.path += [os.path.abspath('../../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import ocpcaproj
import ndlib
import ocpcarest
import ocpcadb
import imagecube
"""Build a Cassandra DB from an existing MySQL DB"""
def main():
parser = argparse.ArgumentParser(description='Build a transform DB for Kwame.')
parser.add_argument('outtoken', action="store", help='Token for the Output project.')
parser.add_argument('path', action="store", help='Path to data')
parser.add_argument('resolution', action="store", type=int)
result = parser.parse_args()
with closing ( ocpcaproj.OCPCAProjectsDB() ) as outprojdb:
outproj = outprojdb.loadProject (result.outtoken)
with closing ( ocpcadb.OCPCADB(outproj) ) as outDB:
# Get the source database sizes
(ximagesz, yimagesz) = outproj.datasetcfg.imagesz [ result.resolution ]
(xcubedim, ycubedim, zcubedim) = cubedims = outproj.datasetcfg.cubedim [ result.resolution ]
(startslice, endslice) = outproj.datasetcfg.slicerange
batchsz = zcubedim
# Get the slices
slices = endslice - startslice + 1
# Set the limits for iteration on the number of cubes in each dimension and the limits of iteration
xlimit = (ximagesz-1) / xcubedim + 1
ylimit = (yimagesz-1) / ycubedim + 1
# Round up the zlimit to the next larger
zlimit = (((slices-1)/zcubedim+1)*zcubedim)/zcubedim
zscale = int(outproj.datasetcfg.zscale[result.resolution])
channel = "Grayscale"
outDB.putChannel(channel,1)
for sl in range( startslice, endslice, batchsz ):
slab = np.zeros ( (batchsz,yimagesz,ximagesz), dtype=np.uint16 )
for b in range (batchsz):
if ( sl + b <= endslice ):
filename = '{}00-164_00-152_{:0>6}.tif'.format(result.path,(sl+b)*80)
#filename = '{}00-111_000-29_{:0>6}.tif'.format(result.path,(sl+b)*50)
#filename = '{}00-199_000000_{:0>6}.tif'.format(result.path,(sl+b)*60)
#filename = '{}00-462_000000_{:0>6}.tif'.format(result.path,(sl+b)*50)
#filename = '{}00-427_000000_{:0>6}.tif'.format(result.path,(sl+b)*60)
#filename = '{}00-222_000000_{:0>6}.tif'.format(result.path,(sl+b)*50)
#filename = '{}00-415_000000_{:0>6}.tif'.format(result.path,(sl+b)*50)
#filename = '{}00-117_000000_{:0>6}.tif'.format(result.path,(sl+b)*50)
#filename = '{}00-298_000000_{:0>6}.tif'.format(result.path,(sl+b)*50)
#filename = '{}00-398_000000_{:0>6}.tif'.format(result.path,(sl+b)*60)
#filename = '{}00-532_000000_{:0>6}.tif'.format(result.path,(sl+b)*60)
#filename = '{}00-199_000000_{:0>6}.tif'.format(result.path,(sl+b)*50)
#filename = '{}00-544_000-53_{:0>6}.tif'.format(result.path,(sl+b)*50)
#imageurl = 'Grayscale/{}/{},{}/{},{}/{}/'.format(result.resolution,0,ximagesz,0,yimagesz,sl+b)
print "slice {}".format(sl+b)
try:
#imgdata = ocpcarest.cutout( imageurl, outproj, outDB )
imgdata = cv2.imread(filename,-1)
if imgdata != None:
img = Image.frombuffer( 'I;16', (imgdata.shape[::-1]), imgdata.flatten(), 'raw', 'I;16', 0, 1)
slab[b,:,:] = np.asarray(img.resize( [ximagesz,yimagesz]))
img = None
else:
slab[b,:,:] = np.zeros((yimagesz,ximagesz),dtype=np.uint16)
except IOError, e:
print "Failed to get Cutout. {}".format(e)
for y in range ( 0, yimagesz+1, ycubedim ):
for x in range ( 0, ximagesz+1, xcubedim ):
zidx = ndlib.XYZMorton ( [x/xcubedim,y/ycubedim,(sl-startslice)/zcubedim] )
cubedata = np.zeros ( (zcubedim,ycubedim,xcubedim), dtype=np.uint16 )
xmin = x
ymin = y
xmax = ((min(ximagesz-1,x+xcubedim-1)))+1
ymax = ((min(yimagesz-1,y+ycubedim-1)))+1
zmin = 0
zmax = min(sl+zcubedim,endslice+1)
cubedata[0:zmax-zmin,0:ymax-ymin,0:xmax-xmin] = slab[zmin:zmax, ymin:ymax, xmin:xmax]
cube = imagecube.ImageCube16 ( cubedims )
cube.zeros()
cube.data = cubedata
if np.count_nonzero ( cube.data) != 0:
outDB.putChannelCube ( zidx, 1, result.resolution, cube )
print "Commiting at x:{},y:{},z{}".format(x,y,sl)
outDB.conn.commit()
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.errors;
public class DuplicateSequenceException extends ApiException {
public DuplicateSequenceException(String message) {
super(message);
}
} | java | github | https://github.com/apache/kafka | clients/src/main/java/org/apache/kafka/common/errors/DuplicateSequenceException.java |
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.gapic.vision.v1 import image_annotator_client as iac
from google.cloud.gapic.vision.v1 import enums
from google.cloud.vision.decorators import add_single_feature_methods
from google.cloud.vision.helpers import VisionHelpers
from google.cloud.vision_v1 import types
@add_single_feature_methods
class ImageAnnotatorClient(VisionHelpers, iac.ImageAnnotatorClient):
__doc__ = iac.ImageAnnotatorClient.__doc__
enums = enums
__all__ = (
'enums',
'ImageAnnotatorClient',
'types',
) | unknown | codeparrot/codeparrot-clean | ||
package benchmarks.scheduler
import benchmarks.*
import kotlinx.coroutines.*
import kotlinx.coroutines.channels.*
import org.openjdk.jmh.annotations.*
import java.util.concurrent.*
/*
* Benchmark which launches multiple async jobs each with either own private or global shared state,
* each job iterates over its state multiple times and suspends after every iteration.
* Benchmark is intended to indicate pros and cons of coroutines affinity (assuming threads are rarely migrated)
* and comparison with single thread and ForkJoinPool
*
* Benchmark (dispatcher) (jobsCount) Mode Cnt Score Error Units
* StatefulAsyncBenchmark.dependentStateAsync fjp 1 avgt 10 42.147 ± 11.563 us/op
* StatefulAsyncBenchmark.dependentStateAsync fjp 8 avgt 10 111.053 ± 40.097 us/op
* StatefulAsyncBenchmark.dependentStateAsync fjp 16 avgt 10 239.992 ± 52.839 us/op
* StatefulAsyncBenchmark.dependentStateAsync ftp_1 1 avgt 10 32.851 ± 11.385 us/op
* StatefulAsyncBenchmark.dependentStateAsync ftp_1 8 avgt 10 51.692 ± 0.961 us/op
* StatefulAsyncBenchmark.dependentStateAsync ftp_1 16 avgt 10 101.511 ± 3.060 us/op
* StatefulAsyncBenchmark.dependentStateAsync ftp_8 1 avgt 10 31.549 ± 1.014 us/op
* StatefulAsyncBenchmark.dependentStateAsync ftp_8 8 avgt 10 103.990 ± 1.588 us/op
* StatefulAsyncBenchmark.dependentStateAsync ftp_8 16 avgt 10 156.384 ± 2.914 us/op
*
* StatefulAsyncBenchmark.independentStateAsync fjp 1 avgt 10 32.503 ± 0.721 us/op
* StatefulAsyncBenchmark.independentStateAsync fjp 8 avgt 10 73.000 ± 1.686 us/op
* StatefulAsyncBenchmark.independentStateAsync fjp 16 avgt 10 98.629 ± 7.541 us/op
* StatefulAsyncBenchmark.independentStateAsync ftp_1 1 avgt 10 26.111 ± 0.814 us/op
* StatefulAsyncBenchmark.independentStateAsync ftp_1 8 avgt 10 54.644 ± 1.261 us/op
* StatefulAsyncBenchmark.independentStateAsync ftp_1 16 avgt 10 104.871 ± 1.599 us/op
* StatefulAsyncBenchmark.independentStateAsync ftp_8 1 avgt 10 31.929 ± 0.698 us/op
* StatefulAsyncBenchmark.independentStateAsync ftp_8 8 avgt 10 108.959 ± 1.029 us/op
* StatefulAsyncBenchmark.independentStateAsync ftp_8 16 avgt 10 159.593 ± 5.262 us/op
*
*/
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(value = 2)
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Benchmark)
@Suppress("DEPRECATION_ERROR")
open class StatefulAsyncBenchmark : ParametrizedDispatcherBase() {
private val stateSize = 2048
private val jobSuspensions = 2 // multiplicative factor for throughput
// it's useful to have more jobs than cores so run queue always will be non empty
@Param("1", "8", "16")
var jobsCount = 1
@Param("fjp", "ftp_1", "dispatcher")
override var dispatcher: String = "fjp"
@Volatile
private var state: Array<LongArray>? = null
@Setup
override fun setup() {
super.setup()
state = Array(Runtime.getRuntime().availableProcessors() * 4) { LongArray(stateSize) { ThreadLocalRandom.current().nextLong() } }
}
@Benchmark
fun independentStateAsync() = runBlocking {
val broadcastChannel = BroadcastChannel<Int>(1)
val subscriptionChannel = Channel<Int>(jobsCount)
val jobs= (0 until jobsCount).map { launchJob(it, broadcastChannel, subscriptionChannel) }.toList()
repeat(jobsCount) {
subscriptionChannel.receive() // await all jobs to start
}
// Fire barrier to start execution
broadcastChannel.send(1)
jobs.forEach { it.await() }
}
@Benchmark
fun dependentStateAsync() = runBlocking {
val broadcastChannel = BroadcastChannel<Int>(1)
val subscriptionChannel = Channel<Int>(jobsCount)
val jobs= (0 until jobsCount).map { launchJob(0, broadcastChannel, subscriptionChannel) }.toList()
repeat(jobsCount) {
subscriptionChannel.receive() // await all jobs to start
}
// Fire barrier to start execution
broadcastChannel.send(1)
jobs.forEach { it.await() }
}
private fun launchJob(
stateNum: Int,
channel: BroadcastChannel<Int>,
subscriptionChannel: Channel<Int>
): Deferred<Long> =
async {
val subscription = channel.openSubscription()
subscriptionChannel.send(1)
subscription.receive()
var sum = 0L
repeat(jobSuspensions) {
val arr = state!![stateNum]
for (i in 0 until stateSize) {
sum += arr[i]
}
yield()
}
sum
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | benchmarks/src/jmh/kotlin/benchmarks/scheduler/StatefulAwaitsBenchmark.kt |
package client
import (
"net/http"
"testing"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/moby/api/types/container"
"gotest.tools/v3/assert"
is "gotest.tools/v3/assert/cmp"
)
func TestContainerUpdateError(t *testing.T) {
client, err := New(WithMockClient(errorMock(http.StatusInternalServerError, "Server error")))
assert.NilError(t, err)
_, err = client.ContainerUpdate(t.Context(), "nothing", ContainerUpdateOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInternal))
_, err = client.ContainerUpdate(t.Context(), "", ContainerUpdateOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
_, err = client.ContainerUpdate(t.Context(), " ", ContainerUpdateOptions{})
assert.Check(t, is.ErrorType(err, cerrdefs.IsInvalidArgument))
assert.Check(t, is.ErrorContains(err, "value is empty"))
}
func TestContainerUpdate(t *testing.T) {
const expectedURL = "/containers/container_id/update"
client, err := New(WithMockClient(func(req *http.Request) (*http.Response, error) {
if err := assertRequest(req, http.MethodPost, expectedURL); err != nil {
return nil, err
}
return mockJSONResponse(http.StatusOK, nil, container.UpdateResponse{})(req)
}))
assert.NilError(t, err)
_, err = client.ContainerUpdate(t.Context(), "container_id", ContainerUpdateOptions{
Resources: &container.Resources{
CPUPeriod: 1,
},
RestartPolicy: &container.RestartPolicy{
Name: "always",
},
})
assert.NilError(t, err)
} | go | github | https://github.com/moby/moby | client/container_update_test.go |
# Copyright 2012-2015, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import stem.response
import stem.socket
class MapAddressResponse(stem.response.ControlMessage):
"""
Reply for a MAPADDRESS query.
Doesn't raise an exception unless no addresses were mapped successfully.
:var dict entries: mapping between the original and replacement addresses
:raises:
* :class:`stem.OperationFailed` if Tor was unable to satisfy the request
* :class:`stem.InvalidRequest` if the addresses provided were invalid
"""
def _parse_message(self):
# Example:
# 250-127.192.10.10=torproject.org
# 250 1.2.3.4=tor.freehaven.net
if not self.is_ok():
for code, _, message in self.content():
if code == '512':
raise stem.InvalidRequest(code, message)
elif code == '451':
raise stem.OperationFailed(code, message)
else:
raise stem.ProtocolError('MAPADDRESS returned unexpected response code: %s', code)
self.entries = {}
for code, _, message in self.content():
if code == '250':
try:
key, value = message.split('=', 1)
self.entries[key] = value
except ValueError:
raise stem.ProtocolError(None, "MAPADDRESS returned '%s', which isn't a mapping" % message) | unknown | codeparrot/codeparrot-clean | ||
import type { DesignSystem } from '../design-system'
import { ThemeOptions } from '../theme'
import type { ResolvedConfig } from './config/types'
function resolveThemeValue(value: unknown, subValue: string | null = null): string | null {
if (
Array.isArray(value) &&
value.length === 2 &&
typeof value[1] === 'object' &&
typeof value[1] !== null
) {
return subValue ? (value[1][subValue] ?? null) : value[0]
} else if (Array.isArray(value) && subValue === null) {
return value.join(', ')
} else if (typeof value === 'string' && subValue === null) {
return value
}
return null
}
export function applyConfigToTheme(
designSystem: DesignSystem,
{ theme }: ResolvedConfig,
replacedThemeKeys: Set<string>,
) {
for (let replacedThemeKey of replacedThemeKeys) {
let name = keyPathToCssProperty([replacedThemeKey])
if (!name) continue
designSystem.theme.clearNamespace(`--${name}`, ThemeOptions.DEFAULT)
}
for (let [path, value] of themeableValues(theme)) {
if (typeof value !== 'string' && typeof value !== 'number') {
continue
}
// Replace `<alpha-value>` with `1`
if (typeof value === 'string') {
value = value.replace(/<alpha-value>/g, '1')
}
// Convert `opacity` namespace from decimal to percentage values
if (path[0] === 'opacity' && (typeof value === 'number' || typeof value === 'string')) {
let numValue = typeof value === 'string' ? parseFloat(value) : value
if (numValue >= 0 && numValue <= 1) {
value = numValue * 100 + '%'
}
}
let name = keyPathToCssProperty(path)
if (!name) continue
designSystem.theme.add(
`--${name}`,
'' + value,
ThemeOptions.INLINE | ThemeOptions.REFERENCE | ThemeOptions.DEFAULT,
)
}
// If someone has updated `fontFamily.sans` or `fontFamily.mono` in a JS
// config, we need to make sure variables like `--default-font-family` and
// `--default-font-feature-settings` are updated to match those explicit
// values, because variables like `--font-family-sans` and
// `--font-family-sans--feature-settings` (which the `--default-font-*`
// variables reference) won't exist in the generated CSS.
if (Object.hasOwn(theme, 'fontFamily')) {
let options = ThemeOptions.INLINE | ThemeOptions.DEFAULT
// Replace `--default-font-*` with `fontFamily.sans` values
{
let fontFamily = resolveThemeValue(theme.fontFamily.sans)
if (fontFamily && designSystem.theme.hasDefault('--font-sans')) {
designSystem.theme.add('--default-font-family', fontFamily, options)
designSystem.theme.add(
'--default-font-feature-settings',
resolveThemeValue(theme.fontFamily.sans, 'fontFeatureSettings') ?? 'normal',
options,
)
designSystem.theme.add(
'--default-font-variation-settings',
resolveThemeValue(theme.fontFamily.sans, 'fontVariationSettings') ?? 'normal',
options,
)
}
}
// Replace `--default-mono-font-*` with `fontFamily.mono` values
{
let fontFamily = resolveThemeValue(theme.fontFamily.mono)
if (fontFamily && designSystem.theme.hasDefault('--font-mono')) {
designSystem.theme.add('--default-mono-font-family', fontFamily, options)
designSystem.theme.add(
'--default-mono-font-feature-settings',
resolveThemeValue(theme.fontFamily.mono, 'fontFeatureSettings') ?? 'normal',
options,
)
designSystem.theme.add(
'--default-mono-font-variation-settings',
resolveThemeValue(theme.fontFamily.mono, 'fontVariationSettings') ?? 'normal',
options,
)
}
}
}
return theme
}
export function themeableValues(config: ResolvedConfig['theme']): [string[], unknown][] {
let toAdd: [string[], unknown][] = []
walk(config as any, [], (value, path) => {
if (isValidThemePrimitive(value)) {
toAdd.push([path, value])
return WalkAction.Skip
}
if (isValidThemeTuple(value)) {
toAdd.push([path, value[0]])
for (let key of Reflect.ownKeys(value[1]) as string[]) {
toAdd.push([[...path, `-${key}`], value[1][key]])
}
return WalkAction.Skip
}
if (Array.isArray(value) && value.every((v) => typeof v === 'string')) {
if (path[0] === 'fontSize') {
toAdd.push([path, value[0]])
if (value.length >= 2) {
toAdd.push([[...path, '-line-height'], value[1]])
}
} else {
toAdd.push([path, value.join(', ')])
}
return WalkAction.Skip
}
})
return toAdd
}
const SPECIAL_DEFAULT_KEYS: Record<string, string> = {
borderWidth: 'border-width',
outlineWidth: 'outline-width',
ringColor: 'ring-color',
ringWidth: 'ring-width',
transitionDuration: 'transition-duration',
transitionTimingFunction: 'transition-timing-function',
}
const OLD_TO_NEW_NAMESPACE: Record<string, string> = {
animation: 'animate',
aspectRatio: 'aspect',
borderRadius: 'radius',
boxShadow: 'shadow',
colors: 'color',
containers: 'container',
fontFamily: 'font',
fontSize: 'text',
letterSpacing: 'tracking',
lineHeight: 'leading',
maxWidth: 'container',
screens: 'breakpoint',
transitionTimingFunction: 'ease',
}
const IS_VALID_KEY = /^[a-zA-Z0-9-_%/\.]+$/
export function keyPathToCssProperty(path: string[]) {
// In some special cases the `DEFAULT` key did not map to a "default" utility
// e.g. `ringColor.DEFAULT` wasn't *just* used for `ring`. It was used for
// all ring utilities as the color when one wasn't specified.
//
// We place these specialty values under the `--default-*` namespace to signal
// that they are defaults used by (potentially) multiple utilities.
let specialDefault = SPECIAL_DEFAULT_KEYS[path[0]]
if (specialDefault && path[1] === 'DEFAULT') return `default-${specialDefault}`
// The legacy container component config should not be included in the Theme
if (path[0] === 'container') return null
for (let part of path) {
if (!IS_VALID_KEY.test(part)) return null
}
// Map old v3 namespaces to new theme namespaces
let ns = OLD_TO_NEW_NAMESPACE[path[0]]
if (ns) {
path = path.slice()
path[0] = ns
}
return (
path
// [1] should move into the nested object tuple. To create the CSS variable
// name for this, we replace it with an empty string that will result in two
// subsequent dashes when joined.
//
// E.g.:
// - `fontSize.xs.1.lineHeight` -> `font-size-xs--line-height`
// - `spacing.1` -> `--spacing-1`
.map((path, idx, all) => (path === '1' && idx !== all.length - 1 ? '' : path))
// Resolve the key path to a CSS variable segment
.map((part, idx) => {
part = part.replaceAll('.', '_')
let shouldConvert =
// The first "namespace" part should be converted to kebab-case
// This converts things like backgroundColor to `background-color`
idx === 0 ||
// Any tuple nested key should be converted to kebab-case
// These are identified with a leading `-`
// e.g. `fontSize.xs.1.lineHeight` -> `font-size-xs--line-height`
part.startsWith('-') ||
// `lineHeight` is a bit of a special case in which it does not
// always begin with a leading `-` even when as a nested tuple key
part === 'lineHeight'
if (shouldConvert) {
part = part.replace(/([a-z])([A-Z])/g, (_, a, b) => `${a}-${b.toLowerCase()}`)
}
return part
})
// Remove the `DEFAULT` key at the end of a path
// We're reading from CSS anyway so it'll be a string
.filter((part, index) => part !== 'DEFAULT' || index !== path.length - 1)
.join('-')
)
}
function isValidThemePrimitive(value: unknown) {
return typeof value === 'number' || typeof value === 'string'
}
function isValidThemeTuple(value: unknown): value is [string, Record<string, string | number>] {
// Check for tuple values of the form
// `[string, Record<string, string | number>]`
if (!Array.isArray(value)) return false
if (value.length !== 2) return false
// A string or number as the "value"
if (typeof value[0] !== 'string' && typeof value[0] !== 'number') return false
// An object as the nested theme values
if (value[1] === undefined || value[1] === null) return false
if (typeof value[1] !== 'object') return false
for (let key of Reflect.ownKeys(value[1])) {
if (typeof key !== 'string') return false
if (typeof value[1][key] !== 'string' && typeof value[1][key] !== 'number') return false
}
return true
}
const enum WalkAction {
/** Continue walking, which is the default */
Continue,
/** Skip visiting the children of this node */
Skip,
/** Stop the walk entirely */
Stop,
}
function walk(
obj: Record<string, unknown>,
path: string[] = [],
callback: (value: unknown, path: string[]) => WalkAction | void,
) {
for (let key of Reflect.ownKeys(obj) as string[]) {
let value = obj[key]
if (value === undefined || value === null) {
continue
}
let keyPath = [...path, key]
let result = callback(value, keyPath) ?? WalkAction.Continue
if (result === WalkAction.Skip) continue
if (result === WalkAction.Stop) return WalkAction.Stop
if (!Array.isArray(value) && typeof value !== 'object') continue
if (walk(value as any, keyPath, callback) === WalkAction.Stop) {
return WalkAction.Stop
}
}
} | typescript | github | https://github.com/tailwindlabs/tailwindcss | packages/tailwindcss/src/compat/apply-config-to-theme.ts |
import logging
import logging.handlers
import os
def convert_log_level(level=26):
"""
Get a numeric log level from a string. The default 26 is for SHORT logs.
:param level
:return level
"""
# annoying but the level can be passed in as None
if not level:
level = 26
levels = {'notset': 0, 'debug': 10, 'info': 20, 'minimal': 22,
'short': 26, 'warning': 30, 'error': 40, 'critical': 50}
if isinstance(level, str):
level = levels.get(level)
return level
def get_log_file(filename=None):
# make sure the log directory exists and place the log file there
if filename is None:
filename = os.path.join(
os.path.expanduser('~'),
'.tvrenamr',
'tvrenamr.log'
)
filename = filename.replace('~', os.path.expanduser('~'))
try:
os.makedirs(os.path.split(filename)[0])
except OSError:
pass
return filename
def start_logging(filename, log_level, quiet=False):
"""
Setup the file logging and start the root logger
"""
filename = get_log_file(filename)
log_level = convert_log_level(log_level)
# add the custom levels
logging.addLevelName(22, 'MINIMAL')
logging.addLevelName(26, 'SHORT')
# setup log file
file_format = '%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s'
handler = logging.handlers.RotatingFileHandler(filename, maxBytes=1048576, backupCount=10)
handler.setFormatter(logging.Formatter(file_format, '%Y-%m-%dT%H:%M'))
logging.getLogger().addHandler(handler)
logging.getLogger().setLevel(logging.DEBUG)
if not quiet:
# setup the console logs to debug
# debug
if log_level is 10:
console_format = '%(asctime)-15s %(levelname)-8s %(name)-11s %(message)s'
console_datefmt = '%Y-%m-%d %H:%M'
else:
console_format = '%(message)s'
console_datefmt = ''
console_formatter = logging.Formatter(console_format, console_datefmt)
# define a Handler with the given level and outputs to the console
console = logging.StreamHandler()
console.setLevel(log_level)
# set the console format & attach the handler to the root logger with it.
console.setFormatter(console_formatter)
logging.getLogger().addHandler(console) | unknown | codeparrot/codeparrot-clean | ||
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network architecture."""
import tensorflow as tf
from cola import constants
class DotProduct(tf.keras.layers.Layer):
"""Normalized dot product."""
def call(self, anchor, positive):
anchor = tf.nn.l2_normalize(anchor, axis=-1)
positive = tf.nn.l2_normalize(positive, axis=-1)
return tf.linalg.matmul(anchor, positive, transpose_b=True)
class BilinearProduct(tf.keras.layers.Layer):
"""Bilinear product."""
def __init__(self, dim):
super().__init__()
self._dim = dim
def build(self, _):
self._w = self.add_weight(
shape=(self._dim, self._dim),
initializer="random_normal",
trainable=True,
name="bilinear_product_weight",
)
def call(self, anchor, positive):
projection_positive = tf.linalg.matmul(self._w, positive, transpose_b=True)
return tf.linalg.matmul(anchor, projection_positive)
class ContrastiveModel(tf.keras.Model):
"""Wrapper class for custom contrastive model."""
def __init__(self, embedding_model, temperature, similarity_layer,
similarity_type):
super().__init__()
self.embedding_model = embedding_model
self._temperature = temperature
self._similarity_layer = similarity_layer
self._similarity_type = similarity_type
def train_step(self, data):
anchors, positives = data
with tf.GradientTape() as tape:
inputs = tf.concat([anchors, positives], axis=0)
embeddings = self.embedding_model(inputs, training=True)
anchor_embeddings, positive_embeddings = tf.split(embeddings, 2, axis=0)
# logits
similarities = self._similarity_layer(anchor_embeddings,
positive_embeddings)
if self._similarity_type == constants.SimilarityMeasure.DOT:
similarities /= self._temperature
sparse_labels = tf.range(tf.shape(anchors)[0])
loss = self.compiled_loss(sparse_labels, similarities)
loss += sum(self.losses)
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
self.compiled_metrics.update_state(sparse_labels, similarities)
return {m.name: m.result() for m in self.metrics}
def get_efficient_net_encoder(input_shape, pooling):
"""Wrapper function for efficient net B0."""
efficient_net = tf.keras.applications.EfficientNetB0(
include_top=False, weights=None, input_shape=input_shape, pooling=pooling)
# To set the name `encoder` as it is used by supervised module for
# to trainable value.
return tf.keras.Model(
efficient_net.inputs, efficient_net.outputs, name="encoder")
def get_contrastive_network(embedding_dim,
temperature,
pooling_type="max",
similarity_type=constants.SimilarityMeasure.DOT,
input_shape=(None, 64, 1)):
"""Creates a model for contrastive learning task."""
inputs = tf.keras.layers.Input(input_shape)
encoder = get_efficient_net_encoder(input_shape, pooling_type)
x = encoder(inputs)
outputs = tf.keras.layers.Dense(embedding_dim, activation="linear")(x)
if similarity_type == constants.SimilarityMeasure.BILINEAR:
outputs = tf.keras.layers.LayerNormalization()(outputs)
outputs = tf.keras.layers.Activation("tanh")(outputs)
embedding_model = tf.keras.Model(inputs, outputs)
if similarity_type == constants.SimilarityMeasure.BILINEAR:
embedding_dim = embedding_model.output.shape[-1]
similarity_layer = BilinearProduct(embedding_dim)
else:
similarity_layer = DotProduct()
return ContrastiveModel(embedding_model, temperature, similarity_layer,
similarity_type) | unknown | codeparrot/codeparrot-clean | ||
from datetime import datetime
from src.PTTparser import PTTparser
from src.DBmanage import DBmanage
class PTTcrawler:
def __init__(self):
self.db = DBmanage()
self.pttParser = PTTparser()
def crawlHotBoards(self):
hotBoardList = self.pttParser.parseHotBoard()
for board in hotBoardList:
self.crawlBoard(board)
self.crawlArticlesInBoard(board)
def crawlBoard(self, boardName):
pagesToBeCrawl = 10
print('Crawling board...', 'boardname:', boardName)
parseBoardResult = self.pttParser.parseBoard(boardName, pagesToBeCrawl)
crawlResult = {
'boardName': boardName,
'crawlPages': parseBoardResult,
'crawlPagesCount': len(parseBoardResult),
'timeStamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
self.db.saveCrawledBoardResult(crawlResult)
def crawlArticlesInBoard(self, boardName):
print('Crawling articles in board...', 'boardname:', boardName)
latestBoardResultPath = self.db.getLatestBoardResultPath(boardName)
boardResult = self.db.loadCrawledBoardResult(latestBoardResultPath)
print('load boardResult from', latestBoardResultPath)
articleInfoList = self.getArticleInfoList(boardResult)
allArticle = self.getAllArticle(boardName, articleInfoList)
crawlResult = {
'boardName': boardName,
'crawlArticles': allArticle,
'crawlArticlesCount': len(allArticle),
'timeStamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
self.db.saveCrawledArticleResult(crawlResult)
def getArticleInfoList(self, boardResult):
articleInfoList = []
for page in boardResult['crawlPages']:
articleInfoList += page['articleList']
return articleInfoList
def getAllArticle(self, boardName, articleInfoList):
allArticle = []
for articleInfo in articleInfoList:
articleID = articleInfo['articleID']
print('Board:', boardName, '\t', 'Article ID:', articleID)
try:
article = self.pttParser.parseArticle(boardName, articleID)
except Exception as e:
print('Page Not Found')
else:
allArticle.append(article)
print('Parsed successfully.')
finally:
print()
return allArticle | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import ast
from airflow.utils.dag_version_inflation_checker import (
AirflowRuntimeVaryingValueChecker,
DagTaskDetector,
RuntimeVaryingValueAnalyzer,
RuntimeVaryingValueWarning,
WarningContext,
)
class TestRuntimeVaryingValueAnalyzer:
def setup_method(self):
"""Each test gets a fresh analyzer instance."""
self.varying_vars = {}
self.imports = {}
self.from_imports = {}
self.analyzer = RuntimeVaryingValueAnalyzer(self.varying_vars, self.imports, self.from_imports)
def test_is_runtime_varying_attribute_call__detects_datetime_now(self):
"""datetime.now() should be recognized as runtime-varying."""
code = "datetime.now()"
call_node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
# The func is an Attribute node: datetime.now
assert isinstance(call_node.func, ast.Attribute)
result = self.analyzer.is_runtime_varying_attribute_call(call_node.func)
assert result is True
def test_is_runtime_varying_attribute_call__ignores_static_method(self):
"""Static methods like str.upper() should NOT be detected."""
code = "str.upper('hello')"
call_node = ast.parse(code, mode="eval").body
assert isinstance(call_node.func, ast.Attribute)
result = self.analyzer.is_runtime_varying_attribute_call(call_node.func)
assert result is False
def test_is_runtime_varying_attribute_call__handles_aliased_imports(self):
"""
Should detect runtime-varying calls even with import aliases.
Example: import datetime as dt; dt.now()
"""
code = "dt.now()"
call_node = ast.parse(code, mode="eval").body
self.imports["dt"] = "datetime" # dt is alias for datetime
assert isinstance(call_node.func, ast.Attribute)
result = self.analyzer.is_runtime_varying_attribute_call(call_node.func)
assert result is True
def test_is_runtime_varying_name_call__detects_uuid4(self):
"""Detect uuid4() when imported as "from uuid import uuid4."""
code = "uuid4()"
call_node = ast.parse(code, mode="eval").body
self.from_imports["uuid4"] = ("uuid", "uuid4")
assert isinstance(call_node.func, ast.Name)
result = self.analyzer.is_runtime_varying_name_call(call_node.func)
assert result is True
def test_is_runtime_varying_name_call__ignores_regular_function(self):
code = "my_function()"
call_node = ast.parse(code, mode="eval").body
assert isinstance(call_node.func, ast.Name)
result = self.analyzer.is_runtime_varying_name_call(call_node.func)
assert result is False
def test_has_varying_arguments__detects_varying_positional_arg(self):
"""
Detect when a positional argument is runtime-varying.
Example: print(datetime.now())
"""
code = "print(datetime.now())"
call_node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.has_varying_arguments(call_node)
assert result is True
def test_has_varying_arguments__detects_varying_keyword_arg(self):
"""
Detect when a keyword argument is runtime-varying.
Example: func(param=random.randint(1, 10))
"""
code = "func(param=func1(random.randint(1, 10)))"
call_node = ast.parse(code, mode="eval").body
self.imports["random"] = "random"
result = self.analyzer.has_varying_arguments(call_node)
assert result is True
def test_has_varying_arguments__returns_false_for_static_args(self):
"""
Static arguments should return False.
Example: print("hello", 123)
"""
code = 'print("hello", 123)'
call_node = ast.parse(code, mode="eval").body
result = self.analyzer.has_varying_arguments(call_node)
assert result is False
def test_is_runtime_varying_call__true_when_function_itself_varies(self):
"""
Return True when the function call itself is runtime-varying.
Example: datetime.now() - the function is the varying part
"""
code = "datetime.now()"
call_node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.is_runtime_varying_call(call_node)
assert result is True
def test_is_runtime_varying_call__true_when_argument_varies(self):
"""
Return True when arguments contain runtime-varying values.
Example: print(datetime.now()) - print is static but arg varies
"""
code = "print(datetime.now())"
call_node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.is_runtime_varying_call(call_node)
assert result is True
def test_is_runtime_varying_call__false_when_completely_static(self):
"""Return False when both function and arguments are static."""
code = 'print("hello")'
call_node = ast.parse(code, mode="eval").body
result = self.analyzer.is_runtime_varying_call(call_node)
assert result is False
def test_get_varying_source__detects_direct_call(self):
"""Detect direct runtime-varying function calls."""
code = "datetime.now()"
node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.get_varying_source(node)
assert result == "datetime.now()"
def test_get_varying_source__detects_variable_reference(self):
"""
Detect when a variable holds a runtime-varying value.
Example: current_time = datetime.now();
"""
code = "current_time"
node = ast.parse(code, mode="eval").body
self.varying_vars["current_time"] = (10, "datetime.now()")
result = self.analyzer.get_varying_source(node)
assert result == "datetime.now()"
def test_get_varying_source__detects_in_fstring(self):
"""
Detect runtime-varying values embedded in f-strings.
Example: f"dag_{datetime.now()}"
"""
code = 'f"dag_{datetime.now()}"'
node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.get_varying_source(node)
assert result == "datetime.now()"
def test_get_varying_source__detects_in_list(self):
"""
Detect runtime-varying values inside list literals.
Example: [1, 2, datetime.now()]
"""
code = "[1, 2, datetime.now()]"
node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.get_varying_source(node)
assert result == "datetime.now()"
def test_get_varying_source__detects_in_dict_value(self):
"""
Detect runtime-varying values in dictionary values.
Example: {"key": datetime.now()}
"""
code = '{"key": datetime.now()}'
node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.get_varying_source(node)
assert result == "datetime.now()"
def test_get_varying_source__detects_in_binary_operation(self):
"""
Detect runtime-varying values in binary operations.
Example: "prefix_" + str(datetime.now())
"""
code = '"prefix_" + str(datetime.now())'
node = ast.parse(code, mode="eval").body
self.imports["datetime"] = "datetime"
result = self.analyzer.get_varying_source(node)
assert result is not None
assert "datetime.now()" in result
def test_get_varying_source__returns_none_for_static_values(self):
"""
Return None for completely static values.
Example: "static_string", 123, [1, 2, 3]
"""
static_values = [
'"static_string"',
"123",
"[1, 2, 3]",
'{"key": "value"}',
]
for code in static_values:
node = ast.parse(code, mode="eval").body
result = self.analyzer.get_varying_source(node)
assert result is None, f"Expected None for static value: {code}"
class TestDAGTaskDetector:
def setup_method(self):
"""Each test gets a fresh detector instance"""
self.from_imports = {}
self.detector = DagTaskDetector(self.from_imports)
def test_is_dag_constructor__detects_traditional_dag_call_uppercase(self):
"""
Detect uppercase DAG() when imported.
Usage: dag = DAG(dag_id="my_dag")
"""
code = 'DAG(dag_id="my_dag")'
call_node = ast.parse(code, mode="eval").body
self.from_imports["DAG"] = ("airflow", "DAG")
result = self.detector.is_dag_constructor(call_node)
assert result is True
def test_is_dag_constructor__detects_dag_generated_by_decorator(self):
"""
Detect Dag generated by decorator.
Usage: @dag(dag_id="my_dag")
"""
code = 'dag(dag_id="my_dag")'
call_node = ast.parse(code, mode="eval").body
self.from_imports["dag"] = ("airflow.decorators", "dag")
result = self.detector.is_dag_constructor(call_node)
assert result is True
def test_is_dag_constructor__ignores_non_dag_functions(self):
"""Regular function calls should not be detected as Dag constructors."""
code = "my_function()"
call_node = ast.parse(code, mode="eval").body
result = self.detector.is_dag_constructor(call_node)
assert result is False
def test_is_task_constructor__true_when_inside_dag_context(self):
"""
Any function call inside a Dag with-block is considered a task.
Example:
with DAG() as dag:
PythonOperator() # <- This is a task
"""
code = "PythonOperator(task_id='my_task')"
call_node = ast.parse(code, mode="eval").body
self.detector.enter_dag_context()
result = self.detector.is_task_constructor(call_node)
assert result is True
def test_is_task_constructor__false_when_outside_dag_context(self):
"""Same call outside Dag context is NOT automatically a task."""
code = "PythonOperator(task_id='my_task')"
call_node = ast.parse(code, mode="eval").body
result = self.detector.is_task_constructor(call_node)
assert result is False
def test_is_task_constructor__true_when_dag_passed_as_argument(self):
"""
Detect task when dag= parameter references a Dag instance.
Example: my_dag = DAG(dag_id='dag); task = PythonOperator(dag=my_dag)
"""
code = "PythonOperator(task_id='task', dag=my_dag)"
call_node = ast.parse(code, mode="eval").body
self.detector.register_dag_instance("my_dag")
result = self.detector.is_task_constructor(call_node)
assert result is True
def test_is_task_constructor__true_when_dag_in_positional_args(self):
"""
Detect task even when Dag is passed as positional argument.
Example: my_dag = DAG(dag_id='dag); task = PythonOperator('task_id', my_dag)
"""
code = "PythonOperator('task_id', my_dag)"
call_node = ast.parse(code, mode="eval").body
self.detector.register_dag_instance("my_dag")
result = self.detector.is_task_constructor(call_node)
assert result is True
def test_enter_and_exit_dag_context(self):
"""Properly track entering and exiting Dag with-blocks."""
assert self.detector.is_in_dag_context is False
self.detector.enter_dag_context()
assert self.detector.is_in_dag_context is True
self.detector.exit_dag_context()
assert self.detector.is_in_dag_context is False
def test_register_dag_instance(self):
"""Remember variable names that hold Dag instances."""
assert "my_dag" not in self.detector.dag_instances
self.detector.register_dag_instance("my_dag")
assert "my_dag" in self.detector.dag_instances
class TestAirflowRuntimeVaryingValueChecker:
"""Tests for AirflowRuntimeVaryingValueChecker (Main Visitor)."""
def setup_method(self):
"""Each test gets a fresh checker instance"""
self.checker = AirflowRuntimeVaryingValueChecker()
def test_visit_import__tracks_simple_import(self):
"""Remember simple imports like 'import datetime'."""
code = "import datetime"
tree = ast.parse(code)
self.checker.visit(tree)
assert "datetime" in self.checker.imports
assert self.checker.imports["datetime"] == "datetime"
def test_visit_import__tracks_aliased_import(self):
"""Remember import aliases like 'import datetime as dt'."""
code = "import datetime as dt"
tree = ast.parse(code)
self.checker.visit(tree)
assert "dt" in self.checker.imports
assert self.checker.imports["dt"] == "datetime"
def test_visit_importfrom__tracks_from_import(self):
"""Remember 'from X import Y' style imports."""
code = "from datetime import now"
tree = ast.parse(code)
self.checker.visit(tree)
assert "now" in self.checker.from_imports
assert self.checker.from_imports["now"] == ("datetime", "now")
def test_visit_importfrom__tracks_aliased_from_import(self):
"""Remember aliases in from imports."""
code = "from datetime import now as current_time"
tree = ast.parse(code)
self.checker.visit(tree)
assert "current_time" in self.checker.from_imports
assert self.checker.from_imports["current_time"] == ("datetime", "now")
def test_visit_assign__registers_dag_instance(self):
"""When assigning DAG(), remember the variable name."""
code = """
from airflow import DAG
my_dag = DAG(dag_id="test")
"""
tree = ast.parse(code)
self.checker.visit(tree)
assert "my_dag" in self.checker.dag_detector.dag_instances
def test_visit_assign__tracks_varying_variable(self):
"""When assigning a runtime-varying value, track the variable."""
code = """
from datetime import datetime
current_time = datetime.now()
"""
tree = ast.parse(code)
self.checker.visit(tree)
assert "current_time" in self.checker.varying_vars
line, source = self.checker.varying_vars["current_time"]
assert "datetime.now()" in source
def test_visit_assign__warns_on_dag_with_varying_value(self):
"""Warn when Dag constructor uses runtime-varying values."""
code = """
from airflow import DAG
from datetime import datetime
dag = DAG(dag_id=f"dag_{datetime.now()}")
"""
tree = ast.parse(code)
self.checker.visit(tree)
assert len(self.checker.static_check_result.warnings) == 1
assert any("Dag constructor" in w.message for w in self.checker.static_check_result.warnings)
def test_visit_call__detects_task_in_dag_context(self):
"""Detect task creation inside Dag with block."""
code = """
from airflow import DAG
from airflow.operators.python import PythonOperator
from datetime import datetime
with DAG(dag_id="test") as dag:
task = PythonOperator(task_id=f"task_{datetime.now()}") # !problem
"""
tree = ast.parse(code)
self.checker.visit(tree)
assert len(self.checker.static_check_result.warnings) == 1
assert any("PythonOperator" in w.code for w in self.checker.static_check_result.warnings)
def test_visit_for__warns_on_varying_range(self):
"""Warn when for-loop range is runtime-varying."""
code = """
from airflow import DAG
from airflow.operators.bash import BashOperator
from datetime import datetime
with DAG(
dag_id=dag_id,
schedule_interval='@daily',
) as dag:
for i in [datetime.now(), "3"]:
task = BashOperator(
task_id='print_bash_hello_{i}',
bash_command=f'echo "Hello from DAG {i}!"', # !problem
dag=dag,
)
"""
tree = ast.parse(code)
self.checker.visit(tree)
warnings = self.checker.static_check_result.warnings
assert len(warnings) == 1
assert any("BashOperator" in w.code for w in warnings)
def test_check_and_warn__creates_warning_for_varying_arg(self):
"""Create a warning when detecting varying positional argument."""
code = 'DAG(f"dag_{datetime.now()}")'
call_node = ast.parse(code, mode="eval").body
self.checker.from_imports["DAG"] = ("airflow", "DAG")
self.checker.imports["datetime"] = "datetime"
self.checker._check_and_warn(call_node, WarningContext.DAG_CONSTRUCTOR)
assert len(self.checker.static_check_result.warnings) == 1
warning = self.checker.static_check_result.warnings[0]
assert WarningContext.DAG_CONSTRUCTOR.value in warning.message
assert "datetime.now()" in warning.code
def test_check_and_warn__creates_warning_for_varying_kwarg(self):
"""Create a warning when detecting varying keyword argument"""
code = "DAG(dag_id=datetime.now())"
call_node = ast.parse(code, mode="eval").body
self.checker.from_imports["DAG"] = ("airflow", "DAG")
self.checker.imports["datetime"] = "datetime"
self.checker._check_and_warn(call_node, WarningContext.TASK_CONSTRUCTOR)
assert len(self.checker.static_check_result.warnings) == 1
warning = self.checker.static_check_result.warnings[0]
assert "dag_id" in warning.code
assert "datetime.now()" in warning.code
class TestIntegrationScenarios:
"""
Integration tests showing real-world Airflow patterns.
Demonstrate actual use cases and why they're problematic.
"""
def _check_code(self, code: str) -> list[RuntimeVaryingValueWarning]:
"""Helper to parse and check code"""
tree = ast.parse(code)
checker = AirflowRuntimeVaryingValueChecker()
checker.visit(tree)
return checker.static_check_result.warnings
def test_antipattern__dynamic_dag_id_with_timestamp(self):
"""ANTI-PATTERN: Using timestamps in Dag IDs."""
code = """
from airflow import DAG
from datetime import datetime
# BAD: Dag ID includes current timestamp
dag = DAG(dag_id=f"report_{datetime.now().strftime('%Y%m%d_%H%M%S')}")
"""
warnings = self._check_code(code)
assert len(warnings) == 1
assert any("datetime.now()" in w.code for w in warnings)
def test_define_dag_with_block(self):
code = """
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.operators.bash import BashOperator
import uuid
from datetime import datetime as dt
start_date = dt.now()
default_args = {
'start_date': start_date
}
with DAG(
dag_id="my_dag",
default_args=default_args # !problem
) as dag, Test(default_args=default_args) as test:
task1 = PythonOperator(
task_id=f"task_{uuid.uuid4()}", # !problem
python_callable=lambda: None
)
task2 = BashOperator(
task_id=f"task_{dt.now()}" # !problem
)
task3 = BashOperator(
task_id="task_for_normal_case"
)
task1 >> task2 >> task3
"""
warnings = self._check_code(code)
assert len(warnings) == 3
assert any("uuid.uuid4()" in w.code for w in warnings)
assert any("dt.now()" in w.code for w in warnings)
assert any("default_args" in w.code for w in warnings)
def test_correct_pattern__static_dag_with_runtime_context(self):
code = """
from airflow import DAG
from airflow.models.param import Param
from airflow.operators.bash import BashOperator
from datetime import datetime
from mydule import test_function
import time
current_timestamp = time.time()
local_time = time.localtime()
dag = DAG(
dag_id='time_module_dag',
start_date=datetime(2024, 1, 1),
schedule_interval='@daily',
params={
"execution_date": Param(
default=f"manual_run_{datetime.now().isoformat()}", # !problem
description="Unique identifier for the run",
type="string",
minLength=10,
)
},
)
b = test_function(time=current_timestamp)
task1 = BashOperator(
task_id='time_task',
bash_command=f'echo "Timestamp: {current_timestamp}"', # !problem
dag=dag,
)
task2 = BashOperator(
task_id='time_task2',
dag=dag,
)
task1 >> task2
"""
warnings = self._check_code(code)
assert len(warnings) == 2
assert any("Param(default=f'manual_run_{datetime.now().isoformat()}'" in w.code for w in warnings)
assert any("current_timestamp" in w.code for w in warnings)
def test_dag_decorator_pattern__currently_not_detected(self):
"""
PATTERN: @dag decorator usage
"""
code = """
from airflow.decorators import dag, task
from datetime import datetime
@dag(dag_id=f"my_dag_{datetime.now()}") # !problem
def my_dag_function():
@task
def my_task():
return "hello"
my_task()
"""
warnings = self._check_code(code)
assert len(warnings) == 1
def test_dag_generated_in_for_or_function_statement(self):
code = """
from airflow import DAG
from airflow.operators.bash import BashOperator
from datetime import datetime
import pendulum
def create_dag(dag_id, task_id):
default_args = {
"depends_on_past": False,
"start_date": datetime.now()
}
with DAG(
dag_id,
default_args=default_args, # !problem
) as dag:
task1 = BashOperator(
task_id=task_id
)
return dag
now = pendulum.now()
seoul = now.in_timezone('Asia/Seoul')
for i in [datetime.now(), "3"]:
dag_id = f"dag_{i}_{random.randint(1, 1000)}"
dag = DAG(
dag_id=dag_id, # !problem
schedule_interval='@daily',
tags=[f"iteration_{i}"],
)
task1 = BashOperator(
task_id='print_bash_hello',
bash_command=f'echo "Hello from DAG {i}!"', # !problem
dag=dag,
)
task2 = BashOperator(
task_id=f'random_task_{random.randint(1, 100)}', # !problem
bash_command='echo "World"',
dag=dag,
)
task3 = BashOperator(
task_id=f'random_task_in_{seoul}', # !problem
bash_command='echo "World"',
dag=dag,
)
task1 >> task2 >> task3
"""
warnings = self._check_code(code)
assert len(warnings) == 5 | python | github | https://github.com/apache/airflow | airflow-core/tests/unit/utils/test_dag_version_inflation_checker.py |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
from neutron_lib.api.definitions import port_security as psec
from neutron_lib import constants as n_const
from neutron_lib.utils import runtime
DIRECTION_IP_PREFIX = {n_const.INGRESS_DIRECTION: 'source_ip_prefix',
n_const.EGRESS_DIRECTION: 'dest_ip_prefix'}
# List of ICMPv6 types that should be permitted (ingress) by default. This list
# depends on iptables conntrack behavior of recognizing ICMP errors (types 1-4)
# as related traffic.
ICMPV6_ALLOWED_INGRESS_TYPES = (n_const.ICMPV6_TYPE_MLD_QUERY,
n_const.ICMPV6_TYPE_NS,
n_const.ICMPV6_TYPE_NA)
# List of ICMPv6 types that should be permitted (egress) by default.
ICMPV6_ALLOWED_EGRESS_TYPES = (n_const.ICMPV6_TYPE_MLD_QUERY,
n_const.ICMPV6_TYPE_RS,
n_const.ICMPV6_TYPE_NS)
# List of ICMPv6 types that should be permitted depending on payload content
# to avoid spoofing (egress) by default.
ICMPV6_RESTRICTED_EGRESS_TYPES = (n_const.ICMPV6_TYPE_NA, )
def port_sec_enabled(port):
return port.get(psec.PORTSECURITY, True)
def load_firewall_driver_class(driver):
return runtime.load_class_by_alias_or_classname(
'neutron.agent.firewall_drivers', driver)
class FirewallDriver(object, metaclass=abc.ABCMeta):
"""Firewall Driver base class.
Defines methods that any driver providing security groups
and provider firewall functionality should implement.
Note port attribute should have information of security group ids and
security group rules.
the dict of port should have
device : interface name
fixed_ips: ips of the device
mac_address: mac_address of the device
security_groups: [sgid, sgid]
security_group_rules : [ rule, rule ]
the rule must contain ethertype and direction
the rule may contain security_group_id,
protocol, port_min, port_max
source_ip_prefix, source_port_min,
source_port_max, dest_ip_prefix, and
remote_group_id
Note: source_group_ip in REST API should be converted by this rule
if direction is ingress:
remote_group_ip will be a source_ip_prefix
if direction is egress:
remote_group_ip will be a dest_ip_prefix
Note: remote_group_id in REST API should be converted by this rule
if direction is ingress:
remote_group_id will be a list of source_ip_prefix
if direction is egress:
remote_group_id will be a list of dest_ip_prefix
remote_group_id will also remaining membership update management
"""
# OVS agent installs arp spoofing openflow rules. If firewall is capable
# of handling that, ovs agent doesn't need to install the protection.
provides_arp_spoofing_protection = False
@abc.abstractmethod
def prepare_port_filter(self, port):
"""Prepare filters for the port.
This method should be called before the port is created.
"""
def apply_port_filter(self, port):
"""Apply port filter.
Once this method returns, the port should be firewalled
appropriately. This method should as far as possible be a
no-op. It's vastly preferred to get everything set up in
prepare_port_filter.
"""
raise NotImplementedError()
@abc.abstractmethod
def update_port_filter(self, port):
"""Refresh security group rules from data store
Gets called when a port gets added to or removed from
the security group the port is a member of or if the
group gains or looses a rule.
"""
def remove_port_filter(self, port):
"""Stop filtering port."""
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of filtering rule."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of rules and apply the rules now."""
pass
@property
def ports(self):
"""Returns filtered ports."""
pass
@contextlib.contextmanager
def defer_apply(self):
"""Defer apply context."""
self.filter_defer_apply_on()
try:
yield
finally:
self.filter_defer_apply_off()
def update_security_group_members(self, sg_id, ips):
"""Update group members in a security group."""
raise NotImplementedError()
def update_security_group_rules(self, sg_id, rules):
"""Update rules in a security group."""
raise NotImplementedError()
def security_group_updated(self, action_type, sec_group_ids,
device_id=None):
"""Called when a security group is updated.
Note: This method needs to be implemented by the firewall drivers
which use enhanced RPC for security_groups.
"""
raise NotImplementedError()
def process_trusted_ports(self, port_ids):
"""Process ports that are trusted and shouldn't be filtered."""
pass
def remove_trusted_ports(self, port_ids):
pass
class NoopFirewallDriver(FirewallDriver):
"""Noop Firewall Driver.
Firewall driver which does nothing.
This driver is for disabling the firewall functionality.
"""
def prepare_port_filter(self, port):
pass
def apply_port_filter(self, port):
pass
def update_port_filter(self, port):
pass
def remove_port_filter(self, port):
pass
def filter_defer_apply_on(self):
pass
def filter_defer_apply_off(self):
pass
@property
def ports(self):
return {}
def update_security_group_members(self, sg_id, ips):
pass
def update_security_group_rules(self, sg_id, rules):
pass
def security_group_updated(self, action_type, sec_group_ids,
device_id=None):
pass | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\Controller;
use Symfony\Component\HttpKernel\Controller\ContainerControllerResolver;
/**
* @author Fabien Potencier <fabien@symfony.com>
*
* @final
*/
class ControllerResolver extends ContainerControllerResolver
{
protected function instantiateController(string $class): object
{
$controller = parent::instantiateController($class);
if ($controller instanceof AbstractController) {
if (null === $previousContainer = $controller->setContainer($this->container)) {
throw new \LogicException(\sprintf('"%s" has no container set, did you forget to define it as a service subscriber?', $class));
}
$controller->setContainer($previousContainer);
}
return $controller;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Controller/ControllerResolver.php |
package daemon
import (
"context"
"fmt"
"github.com/containerd/log"
"github.com/moby/moby/api/types/events"
"github.com/moby/moby/v2/daemon/container"
"github.com/moby/moby/v2/errdefs"
)
// ContainerPause pauses a container
func (daemon *Daemon) ContainerPause(name string) error {
ctr, err := daemon.GetContainer(name)
if err != nil {
return err
}
return daemon.containerPause(ctr)
}
// containerPause pauses the container execution without stopping the process.
// The execution can be resumed by calling containerUnpause.
func (daemon *Daemon) containerPause(container *container.Container) error {
container.Lock()
defer container.Unlock()
// We cannot Pause the container which is not running
tsk, err := container.GetRunningTask()
if err != nil {
return err
}
// We cannot Pause the container which is already paused
if container.State.Paused {
return errdefs.Conflict(fmt.Errorf("container %s is already paused", container.ID))
}
// We cannot Pause the container which is restarting
if container.State.Restarting {
return errContainerIsRestarting(container.ID)
}
if err := tsk.Pause(context.Background()); err != nil {
return fmt.Errorf("cannot pause container %s: %s", container.ID, err)
}
container.State.Paused = true
daemon.setStateCounter(container)
daemon.updateHealthMonitor(container)
daemon.LogContainerEvent(container, events.ActionPause)
if err := container.CheckpointTo(context.WithoutCancel(context.TODO()), daemon.containersReplica); err != nil {
log.G(context.TODO()).WithError(err).Warn("could not save container to disk")
}
return nil
} | go | github | https://github.com/moby/moby | daemon/pause.go |
# pylint: disable=missing-docstring,import-error,unused-import,assignment-from-no-return
# pylint: disable=invalid-name, too-few-public-methods
from __future__ import print_function
from UNINFERABLE import uninferable_func
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
my_single_dispatch = singledispatch
class FakeSingleDispatch(object):
@staticmethod
def register(function):
return function
def __call__(self, function):
return function
fake_singledispatch_decorator = FakeSingleDispatch()
@singledispatch
def func(arg):
return arg
@func.register(str)
def _(arg):
return 42
@func.register(float)
@func.register(int)
def _(arg):
return 42
@my_single_dispatch
def func2(arg):
return arg
@func2.register(int)
def _(arg):
return 42
@singledispatch
def with_extra_arg(arg, verbose=False):
if verbose:
print(arg)
return arg
@with_extra_arg.register(str)
def _(arg, verbose=False):
unused = 42 # [unused-variable]
return arg[::-1]
@fake_singledispatch_decorator
def not_single_dispatch(arg): # [unused-argument]
return 'not yet implemented'
@fake_singledispatch_decorator.register(str)
def bad_single_dispatch(arg): # [unused-argument]
return 42
@fake_singledispatch_decorator.register(str)
def bad_single_dispatch(arg): # [unused-argument, function-redefined]
return 24 | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import theano.tensor as T
from numpy import linalg as la, random as rnd
import pymanopt
from pymanopt.manifolds import Oblique
from pymanopt.solvers import TrustRegions
def rank_k_correlation_matrix_approximation(A, k):
"""
Returns the matrix with unit-norm columns that is closests to A w.r.t. the
Frobenius norm.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Oblique(k, n)
solver = TrustRegions()
X = T.matrix()
@pymanopt.function.Theano(X)
def cost(X):
return 0.25 * T.sum((T.dot(X.T, X) - A) ** 2)
problem = pymanopt.Problem(manifold, cost)
return solver.solve(problem)
if __name__ == "__main__":
# Generate random problem data.
n = 10
k = 3
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Solve the problem with pymanopt.
Xopt = rank_k_correlation_matrix_approximation(A, k)
C = Xopt.T.dot(Xopt)
[w, _] = la.eig(C)
# Print information about the solution.
print('')
print("diagonal:", np.diag(C))
print("trace:", np.trace(C))
print("rank:", la.matrix_rank(C)) | unknown | codeparrot/codeparrot-clean | ||
from functools import partial
import sys
from threading import Thread
try:
import cPickle as pickle
except:
import pickle
import uwsgi
if uwsgi.masterpid() == 0:
raise Exception(
"you have to enable the uWSGI master process to use this module")
spooler_functions = {}
mule_functions = {}
postfork_chain = []
# Python3 compatibility
def _encode1(val):
if sys.version_info >= (3, 0) and isinstance(val, str):
return val.encode('utf-8')
else:
return val
def _decode1(val):
if sys.version_info >= (3, 0) and isinstance(val, bytes):
return val.decode('utf-8')
else:
return val
def _encode_to_spooler(vars):
return dict((_encode1(K), _encode1(V)) for (K, V) in vars.items())
def _decode_from_spooler(vars):
return dict((_decode1(K), _decode1(V)) for (K, V) in vars.items())
def get_free_signal():
for signum in range(0, 256):
if not uwsgi.signal_registered(signum):
return signum
raise Exception("No free uwsgi signal available")
def manage_spool_request(vars):
# To check whether 'args' is in vals or not - decode the keys first,
# because in python3 all keys in 'vals' are have 'byte' types
vars = dict((_decode1(K), V) for (K, V) in vars.items())
if 'args' in vars:
for k in ('args', 'kwargs'):
vars[k] = pickle.loads(vars.pop(k))
vars = _decode_from_spooler(vars)
f = spooler_functions[vars['ud_spool_func']]
if 'args' in vars:
ret = f(*vars['args'], **vars['kwargs'])
else:
ret = f(vars)
return int(vars.get('ud_spool_ret', ret))
def postfork_chain_hook():
for f in postfork_chain:
f()
uwsgi.spooler = manage_spool_request
uwsgi.post_fork_hook = postfork_chain_hook
class postfork(object):
def __init__(self, f):
if callable(f):
self.wid = 0
self.f = f
else:
self.f = None
self.wid = f
postfork_chain.append(self)
def __call__(self, *args, **kwargs):
if self.f:
if self.wid > 0 and self.wid != uwsgi.worker_id():
return
return self.f()
self.f = args[0]
class _spoolraw(object):
def __call__(self, *args, **kwargs):
arguments = self.base_dict.copy()
if not self.pass_arguments:
if len(args) > 0:
arguments.update(args[0])
if kwargs:
arguments.update(kwargs)
else:
spooler_args = {}
for key in ('message_dict', 'spooler', 'priority', 'at', 'body'):
if key in kwargs:
spooler_args.update({key: kwargs.pop(key)})
arguments.update(spooler_args)
arguments.update(
{'args': pickle.dumps(args), 'kwargs': pickle.dumps(kwargs)})
return uwsgi.spool(_encode_to_spooler(arguments))
# For backward compatibility (uWSGI < 1.9.13)
def spool(self, *args, **kwargs):
return self.__class__.__call__(self, *args, **kwargs)
def __init__(self, f, pass_arguments):
if 'spooler' not in uwsgi.opt:
raise Exception(
"you have to enable the uWSGI spooler to use @%s decorator" % self.__class__.__name__)
self.f = f
spooler_functions[self.f.__name__] = self.f
# For backward compatibility (uWSGI < 1.9.13)
self.f.spool = self.__call__
self.pass_arguments = pass_arguments
self.base_dict = {'ud_spool_func': self.f.__name__}
class _spool(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_OK)
return _spoolraw.__call__(self, *args, **kwargs)
class _spoolforever(_spoolraw):
def __call__(self, *args, **kwargs):
self.base_dict['ud_spool_ret'] = str(uwsgi.SPOOL_RETRY)
return _spoolraw.__call__(self, *args, **kwargs)
def spool_decorate(f=None, pass_arguments=False, _class=_spoolraw):
if not f:
return partial(_class, pass_arguments=pass_arguments)
return _class(f, pass_arguments)
def spoolraw(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments)
def spool(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spool)
def spoolforever(f=None, pass_arguments=False):
return spool_decorate(f, pass_arguments, _spoolforever)
class mulefunc(object):
def __init__(self, f):
if callable(f):
self.fname = f.__name__
self.mule = 0
mule_functions[f.__name__] = f
else:
self.mule = f
self.fname = None
def real_call(self, *args, **kwargs):
uwsgi.mule_msg(pickle.dumps(
{
'service': 'uwsgi_mulefunc',
'func': self.fname,
'args': args,
'kwargs': kwargs
}
), self.mule)
def __call__(self, *args, **kwargs):
if not self.fname:
self.fname = args[0].__name__
mule_functions[self.fname] = args[0]
return self.real_call
return self.real_call(*args, **kwargs)
def mule_msg_dispatcher(message):
msg = pickle.loads(message)
if msg['service'] == 'uwsgi_mulefunc':
return mule_functions[msg['func']](*msg['args'], **msg['kwargs'])
uwsgi.mule_msg_hook = mule_msg_dispatcher
class rpc(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.register_rpc(self.name, f)
return f
class farm_loop(object):
def __init__(self, f, farm):
self.f = f
self.farm = farm
def __call__(self):
if uwsgi.mule_id() == 0:
return
if not uwsgi.in_farm(self.farm):
return
while True:
message = uwsgi.farm_get_msg()
if message:
self.f(message)
class farm(object):
def __init__(self, name=None, **kwargs):
self.name = name
def __call__(self, f):
postfork_chain.append(farm_loop(f, self.name))
class mule_brain(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
try:
self.f()
except:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule_brainloop(mule_brain):
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
try:
self.f()
except:
exc = sys.exc_info()
sys.excepthook(exc[0], exc[1], exc[2])
sys.exit(1)
class mule(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mule_brain(f, self.num))
class muleloop(mule):
def __call__(self, f):
postfork_chain.append(mule_brainloop(f, self.num))
class mulemsg_loop(object):
def __init__(self, f, num):
self.f = f
self.num = num
def __call__(self):
if uwsgi.mule_id() == self.num:
while True:
message = uwsgi.mule_get_msg()
if message:
self.f(message)
class mulemsg(object):
def __init__(self, num):
self.num = num
def __call__(self, f):
postfork_chain.append(mulemsg_loop(f, self.num))
class signal(object):
def __init__(self, num, **kwargs):
self.num = num
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
return f
class timer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_timer(self.num, self.secs)
return f
class cron(object):
def __init__(self, minute, hour, day, month, dayweek, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.minute = minute
self.hour = hour
self.day = day
self.month = month
self.dayweek = dayweek
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_cron(self.num, self.minute, self.hour,
self.day, self.month, self.dayweek)
return f
class rbtimer(object):
def __init__(self, secs, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.secs = secs
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_rb_timer(self.num, self.secs)
return f
class filemon(object):
def __init__(self, fsobj, **kwargs):
self.num = kwargs.get('signum', get_free_signal())
self.fsobj = fsobj
self.target = kwargs.get('target', '')
def __call__(self, f):
uwsgi.register_signal(self.num, self.target, f)
uwsgi.add_file_monitor(self.num, self.fsobj)
return f
class erlang(object):
def __init__(self, name):
self.name = name
def __call__(self, f):
uwsgi.erlang_register_process(self.name, f)
return f
class lock(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
# ensure the spooler will not call it
if uwsgi.i_am_the_spooler():
return
uwsgi.lock()
try:
return self.f(*args, **kwargs)
finally:
uwsgi.unlock()
class thread(object):
def __init__(self, f):
self.f = f
def __call__(self, *args):
t = Thread(target=self.f, args=args)
t.daemon = True
t.start()
return self.f
class harakiri(object):
def __init__(self, seconds):
self.s = seconds
def real_call(self, *args, **kwargs):
uwsgi.set_user_harakiri(self.s)
r = self.f(*args, **kwargs)
uwsgi.set_user_harakiri(0)
return r
def __call__(self, f):
self.f = f
return self.real_call | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_proxy_arp
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_proxy_arp.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_proxy_arp_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_proxy_arp_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_proxy_arp_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'proxy-arp', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_proxy_arp_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
delete_method_mock.assert_called_with('system', 'proxy-arp', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_proxy_arp_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_proxy_arp_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_proxy_arp': {
'random_attribute_not_valid': 'tag',
'end_ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_proxy_arp.fortios_system(input_data, fos_instance)
expected_data = {
'end-ip': 'test_value_3',
'id': '4',
'interface': 'test_value_5',
'ip': 'test_value_6'
}
set_method_mock.assert_called_with('system', 'proxy-arp', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200 | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
require "cases/helper"
require "models/post"
require "models/author"
require "models/account"
require "models/categorization"
require "models/comment"
require "models/company"
require "models/tagging"
require "models/topic"
require "models/reply"
require "models/rating"
require "models/entrant"
require "models/project"
require "models/developer"
require "models/computer"
require "models/customer"
require "models/toy"
require "models/matey"
require "models/dog_lover"
require "models/dog"
require "models/car"
require "models/tire"
require "models/subscriber"
require "models/non_primary_key"
require "models/clothing_item"
require "models/cpk"
require "models/edge"
require "support/stubs/strong_parameters"
require "support/async_helper"
class FinderTest < ActiveRecord::TestCase
include AsyncHelper
fixtures :companies, :topics, :entrants, :developers, :developers_projects,
:posts, :comments, :accounts, :authors, :author_addresses, :customers,
:categories, :categorizations, :cars, :clothing_items, :cpk_books, :cpk_reviews
def test_find_by_id_with_hash
assert_nothing_raised do
Post.find_by_id(limit: 1)
end
end
def test_find_by_title_and_id_with_hash
assert_nothing_raised do
Post.find_by_title_and_id("foo", limit: 1)
end
end
def test_find
assert_equal(topics(:first).title, Topic.find(1).title)
end
def test_find_with_hash_parameter
assert_raises(ActiveRecord::RecordNotFound) { Post.find(foo: "bar") }
assert_raises(ActiveRecord::RecordNotFound) { Post.find(foo: "bar", bar: "baz") }
end
def test_find_with_custom_select_excluding_id
# Returns ordered by ids array
topics = Topic.select(:title).find([4, 2, 5])
assert_equal [4, 2, 5], topics.map(&:id)
# Custom order
topics = Topic.select(:title).order(:id).find([4, 2, 5])
assert_equal [2, 4, 5], topics.map(&:id)
end
def test_find_with_proc_parameter_and_block
exception = assert_raises(RuntimeError) do
Topic.all.find(-> { raise "should happen" }) { |e| e.title == "non-existing-title" }
end
assert_equal "should happen", exception.message
assert_nothing_raised do
Topic.all.find(-> { raise "should not happen" }) { |e| e.title == topics(:first).title }
end
end
def test_find_with_ids_returning_ordered
records = Topic.find([4, 2, 5])
assert_equal "The Fourth Topic of the day", records[0].title
assert_equal "The Second Topic of the day", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
records = Topic.find(4, 2, 5)
assert_equal "The Fourth Topic of the day", records[0].title
assert_equal "The Second Topic of the day", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
records = Topic.find(["4", "2", "5"])
assert_equal "The Fourth Topic of the day", records[0].title
assert_equal "The Second Topic of the day", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
records = Topic.find("4", "2", "5")
assert_equal "The Fourth Topic of the day", records[0].title
assert_equal "The Second Topic of the day", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
end
def test_find_with_ids_and_order_clause
# The order clause takes precedence over the informed ids
records = Topic.order(:author_name).find([5, 3, 1])
assert_equal "The Third Topic of the day", records[0].title
assert_equal "The First Topic", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
records = Topic.order(:id).find([5, 3, 1])
assert_equal "The First Topic", records[0].title
assert_equal "The Third Topic of the day", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
end
def test_find_with_ids_with_limit_and_order_clause
# The order clause takes precedence over the informed ids
records = Topic.limit(2).order(:id).find([5, 3, 1])
assert_equal 2, records.size
assert_equal "The First Topic", records[0].title
assert_equal "The Third Topic of the day", records[1].title
end
def test_find_with_ids_and_limit
records = Topic.limit(3).find([3, 2, 5, 1, 4])
assert_equal 3, records.size
assert_equal "The Third Topic of the day", records[0].title
assert_equal "The Second Topic of the day", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
end
def test_find_with_ids_where_and_limit
# Please note that Topic 1 is the only not approved so
# if it were among the first 3 it would raise an ActiveRecord::RecordNotFound
records = Topic.where(approved: true).limit(3).find([3, 2, 5, 1, 4])
assert_equal 3, records.size
assert_equal "The Third Topic of the day", records[0].title
assert_equal "The Second Topic of the day", records[1].title
assert_equal "The Fifth Topic of the day", records[2].title
end
def test_find_with_ids_and_offset
records = Topic.offset(2).find([3, 2, 5, 1, 4])
assert_equal 3, records.size
assert_equal "The Fifth Topic of the day", records[0].title
assert_equal "The First Topic", records[1].title
assert_equal "The Fourth Topic of the day", records[2].title
end
def test_find_with_ids_with_no_id_passed
exception = assert_raises(ActiveRecord::RecordNotFound) { Topic.find }
assert_equal "Topic", exception.model
assert_equal "id", exception.primary_key
end
def test_find_with_ids_with_id_out_of_range
exception = assert_raises(ActiveRecord::RecordNotFound) do
Topic.find("9999999999999999999999999999999")
end
assert_equal "Topic", exception.model
assert_equal "id", exception.primary_key
end
def test_find_passing_active_record_object_is_not_permitted
error = assert_raises(ArgumentError) do
Topic.find(Topic.last)
end
assert_equal "You are passing an instance of ActiveRecord::Base to `find`. " \
"Please pass the id of the object by calling `.id`.", error.message
end
def test_symbols_table_ref
gc_disabled = GC.disable
Post.where("author_id" => nil) # warm up
x = Symbol.all_symbols.count
Post.where("title" => { "xxxqqqq" => "bar" })
assert_equal x, Symbol.all_symbols.count
ensure
GC.enable if gc_disabled == false
end
# find should handle strings that come from URLs
# (example: Category.find(params[:id]))
def test_find_with_string
assert_equal(Topic.find(1).title, Topic.find("1").title)
end
def test_exists
assert_equal true, Topic.exists?(1)
assert_equal true, Topic.exists?("1")
assert_equal true, Topic.exists?(title: "The First Topic")
assert_equal true, Topic.exists?(heading: "The First Topic")
assert_equal true, Topic.exists?(author_name: "Mary", approved: true)
assert_equal true, Topic.exists?(["parent_id = ?", 1])
assert_equal true, Topic.exists?(id: [1, 9999])
assert_equal false, Topic.exists?(45)
assert_equal false, Topic.exists?(9999999999999999999999999999999)
assert_equal false, Topic.exists?(Topic.new.id)
assert_raise(ArgumentError) { Topic.exists?([1, 2]) }
end
def test_exists_with_scope
davids = Author.where(name: "David")
assert_equal true, davids.exists?
assert_equal true, davids.exists?(authors(:david).id)
assert_equal false, davids.exists?(authors(:mary).id)
assert_equal false, davids.exists?("42")
assert_equal false, davids.exists?(42)
assert_equal false, davids.exists?(davids.new.id)
fake = Author.where(name: "fake author")
assert_equal false, fake.exists?
assert_equal false, fake.exists?(authors(:david).id)
end
def test_exists_uses_existing_scope
post = authors(:david).posts.first
authors = Author.includes(:posts).where(name: "David", posts: { id: post.id })
assert_equal true, authors.exists?(authors(:david).id)
end
def test_any_with_scope_on_hash_includes
post = authors(:david).posts.first
categories = Categorization.includes(author: :posts).where(posts: { id: post.id })
assert_equal true, categories.exists?
end
def test_exists_with_polymorphic_relation
post = Post.create!(title: "Post", body: "default", taggings: [Tagging.new(comment: "tagging comment")])
relation = Post.tagged_with_comment("tagging comment")
assert_equal true, relation.exists?(title: ["Post"])
assert_equal true, relation.exists?(["title LIKE ?", "Post%"])
assert_equal true, relation.exists?
assert_equal true, relation.exists?(post.id)
assert_equal true, relation.exists?(post.id.to_s)
assert_equal false, relation.exists?(false)
end
def test_exists_with_string
assert_equal false, Subscriber.exists?("foo")
assert_equal false, Subscriber.exists?(" ")
Subscriber.create!(id: "foo")
Subscriber.create!(id: " ")
assert_equal true, Subscriber.exists?("foo")
assert_equal true, Subscriber.exists?(" ")
end
def test_exists_with_strong_parameters
assert_equal false, Subscriber.exists?(ProtectedParams.new(nick: "foo").permit!)
Subscriber.create!(nick: "foo")
assert_equal true, Subscriber.exists?(ProtectedParams.new(nick: "foo").permit!)
assert_raises(ActiveModel::ForbiddenAttributesError) do
Subscriber.exists?(ProtectedParams.new(nick: "foo"))
end
end
def test_exists_passing_active_record_object_is_not_permitted
error = assert_raises(ArgumentError) do
Topic.exists?(Topic.new)
end
assert_equal "You are passing an instance of ActiveRecord::Base to `exists?`. " \
"Please pass the id of the object by calling `.id`.", error.message
end
def test_exists_does_not_select_columns_without_alias
assert_queries_match(/SELECT 1 AS one FROM #{Regexp.escape(quote_table_name("topics"))}/i) do
Topic.exists?
end
end
def test_exists_returns_true_with_one_record_and_no_args
assert_equal true, Topic.exists?
end
def test_exists_returns_false_with_false_arg
assert_equal false, Topic.exists?(false)
end
def test_exists_with_loaded_relation
topics = Topic.all.load
assert_queries_match(/SELECT 1 AS one/i, count: 1) do
assert_predicate topics, :exists?
end
end
def test_exists_with_empty_loaded_relation
Topic.delete_all
topics = Topic.all.load
assert_queries_match(/SELECT 1 AS one/i, count: 1) do
assert_not_predicate topics, :exists?
end
end
def test_exists_with_loaded_relation_having_unsaved_records
author = authors(:david)
posts = author.posts.load
assert_not_empty posts
posts.each(&:destroy)
assert_queries_match(/SELECT 1 AS one/i) do
assert_not_predicate posts, :exists?
end
end
def test_exists_with_loaded_relation_having_updated_owner_record
author = authors(:david)
assert_not_empty author.posts
author.posts.each do |post|
post.author = nil
post.save!
end
assert_queries_count(1) do
assert_not_predicate author.posts, :exists?
end
end
# exists? should handle nil for id's that come from URLs and always return false
# (example: Topic.exists?(params[:id])) where params[:id] is nil
def test_exists_with_nil_arg
assert_equal false, Topic.exists?(nil)
assert_equal true, Topic.exists?
assert_equal false, Topic.first.replies.exists?(nil)
assert_equal true, Topic.first.replies.exists?
end
def test_exists_with_empty_hash_arg
assert_equal true, Topic.exists?({})
end
def test_exists_with_distinct_and_offset_and_joins
assert_predicate Post.left_joins(:comments).distinct.offset(10), :exists?
assert_not Post.left_joins(:comments).distinct.offset(11).exists?
end
def test_exists_with_distinct_and_offset_and_select
assert_predicate Post.select(:body).distinct.offset(4), :exists?
assert_not Post.select(:body).distinct.offset(5).exists?
end
def test_exists_with_distinct_and_offset_and_eagerload_and_order
assert_predicate Post.eager_load(:comments).distinct.offset(10).merge(Comment.order(post_id: :asc)), :exists?
assert_not Post.eager_load(:comments).distinct.offset(11).merge(Comment.order(post_id: :asc)).exists?
end
# Ensure +exists?+ runs without an error by excluding distinct value.
# See https://github.com/rails/rails/pull/26981.
def test_exists_with_order_and_distinct
assert_equal true, Topic.order(:id).distinct.exists?
end
# Ensure +exists?+ runs without an error by excluding order value.
def test_exists_with_order
assert_equal true, Topic.order(Arel.sql("invalid sql here")).exists?
end
def test_exists_with_large_number
assert_equal true, Topic.where(id: [1, 9223372036854775808]).exists?
assert_equal true, Topic.where(id: 1..9223372036854775808).exists?
assert_equal true, Topic.where(id: -9223372036854775809..9223372036854775808).exists?
assert_equal false, Topic.where(id: 9223372036854775808..9223372036854775809).exists?
assert_equal false, Topic.where(id: -9223372036854775810..-9223372036854775809).exists?
assert_equal false, Topic.where(id: 9223372036854775808..1).exists?
assert_equal true, Topic.where(id: 1).or(Topic.where(id: 9223372036854775808)).exists?
assert_equal true, Topic.where.not(id: 9223372036854775808).exists?
attr = Topic.predicate_builder
assert_predicate Topic.where(attr[:id, -9223372036854775809, :gt]), :exists?
assert_predicate Topic.where(attr[:id, -9223372036854775809, :gteq]), :exists?
assert_predicate Topic.where(attr[:id, 9223372036854775808, :lt]), :exists?
assert_predicate Topic.where(attr[:id, 9223372036854775808, :lteq]), :exists?
assert_not_predicate Topic.where(attr[:id, 9223372036854775808, :gt]), :exists?
assert_not_predicate Topic.where(attr[:id, 9223372036854775808, :gteq]), :exists?
assert_not_predicate Topic.where(attr[:id, -9223372036854775809, :lt]), :exists?
assert_not_predicate Topic.where(attr[:id, -9223372036854775809, :lteq]), :exists?
end
def test_exists_with_joins
assert_equal true, Topic.joins(:replies).where(replies_topics: { approved: true }).order("replies_topics.created_at DESC").exists?
end
def test_exists_with_left_joins
assert_equal true, Topic.left_joins(:replies).where(replies_topics: { approved: true }).order("replies_topics.created_at DESC").exists?
end
def test_exists_with_eager_load
assert_equal true, Topic.eager_load(:replies).where(replies_topics: { approved: true }).order("replies_topics.created_at DESC").exists?
end
def test_exists_with_includes_limit_and_empty_result
assert_no_queries { assert_equal false, Topic.includes(:replies).limit(0).exists? }
assert_queries_count(1) { assert_equal false, Topic.includes(:replies).limit(1).where("0 = 1").exists? }
end
def test_exists_with_distinct_association_includes_and_limit
author = Author.first
unique_categorized_posts = author.unique_categorized_posts.includes(:special_comments)
assert_no_queries { assert_equal false, unique_categorized_posts.limit(0).exists? }
assert_queries_count(1) { assert_equal true, unique_categorized_posts.limit(1).exists? }
end
def test_exists_with_distinct_association_includes_limit_and_order
author = Author.first
unique_categorized_posts = author.unique_categorized_posts.includes(:special_comments).order("comments.tags_count DESC")
assert_no_queries { assert_equal false, unique_categorized_posts.limit(0).exists? }
assert_queries_count(1) { assert_equal true, unique_categorized_posts.limit(1).exists? }
end
def test_exists_should_reference_correct_aliases_while_joining_tables_of_has_many_through_association
ratings = developers(:david).ratings.includes(comment: :post).where(posts: { id: 1 })
assert_queries_count(1) { assert_not_predicate ratings.limit(1), :exists? }
end
def test_exists_with_empty_table_and_no_args_given
Topic.delete_all
assert_equal false, Topic.exists?
end
def test_exists_with_aggregate_having_three_mappings
existing_address = customers(:david).address
assert_equal true, Customer.exists?(address: existing_address)
end
def test_exists_with_aggregate_having_three_mappings_with_one_difference
existing_address = customers(:david).address
assert_equal false, Customer.exists?(address: Address.new(existing_address.street, existing_address.city, existing_address.country + "1"))
assert_equal false, Customer.exists?(address: Address.new(existing_address.street, existing_address.city + "1", existing_address.country))
assert_equal false, Customer.exists?(address: Address.new(existing_address.street + "1", existing_address.city, existing_address.country))
end
def test_exists_does_not_instantiate_records
assert_not_called(Developer, :instantiate) do
Developer.exists?
end
end
def test_include_when_non_AR_object_passed_on_unloaded_relation
assert_no_queries do
assert_equal false, Customer.where(name: "David").include?("I'm not an AR object")
end
end
def test_include_when_non_AR_object_passed_on_loaded_relation
customers = Customer.where(name: "David").load
assert_no_queries do
assert_equal false, customers.include?("I'm not an AR object")
end
end
def test_member_when_non_AR_object_passed_on_unloaded_relation
assert_no_queries do
assert_equal false, Customer.where(name: "David").member?("I'm not an AR object")
end
end
def test_member_when_non_AR_object_passed_on_loaded_relation
customers = Customer.where(name: "David").load
assert_no_queries do
assert_equal false, customers.member?("I'm not an AR object")
end
end
def test_include_on_unloaded_relation_with_match
assert_queries_match(/1 AS one.*LIMIT/) do
assert_equal true, Customer.where(name: "David").include?(customers(:david))
end
end
def test_include_on_unloaded_relation_without_match
assert_queries_match(/1 AS one.*LIMIT/) do
assert_equal false, Customer.where(name: "David").include?(customers(:mary))
end
end
def test_include_on_unloaded_relation_with_mismatched_class
topic = topics(:first)
assert Customer.exists?(topic.id)
assert_no_queries do
assert_equal false, Customer.where(name: "David").include?(topic)
end
end
def test_include_on_unloaded_relation_with_offset
assert_queries_match(/ORDER BY name ASC/) do
assert_equal true, Customer.offset(1).order("name ASC").include?(customers(:mary))
end
end
def test_include_on_unloaded_relation_with_limit
mary = customers(:mary)
barney = customers(:barney)
david = customers(:david)
assert_equal false, Customer.order(id: :desc).limit(2).include?(david)
assert_equal true, Customer.order(id: :desc).limit(2).include?(barney)
assert_equal true, Customer.order(id: :desc).limit(2).include?(mary)
end
def test_include_on_unloaded_relation_with_having_referencing_aliased_select
skip if current_adapter?(:PostgreSQLAdapter)
bob = authors(:bob)
mary = authors(:mary)
assert_equal false, Author.select("COUNT(*) as total_posts", "authors.*").joins(:posts).group(:id).having("total_posts > 2").include?(bob)
assert_equal true, Author.select("COUNT(*) as total_posts", "authors.*").joins(:posts).group(:id).having("total_posts > 2").include?(mary)
end
def test_include_on_loaded_relation_with_match
customers = Customer.where(name: "David").load
david = customers(:david)
assert_no_queries do
assert_equal true, customers.include?(david)
end
end
def test_include_on_loaded_relation_without_match
customers = Customer.where(name: "David").load
mary = customers(:mary)
assert_no_queries do
assert_equal false, customers.include?(mary)
end
end
def test_include_on_unloaded_relation_with_composite_primary_key
assert_queries_match(/1 AS one.*LIMIT/) do
book = cpk_books(:cpk_great_author_first_book)
assert Cpk::Book.where(title: "The first book").include?(book)
end
end
def test_include_on_loaded_relation_with_composite_primary_key
books = Cpk::Book.where(title: "The first book").load
great_author_book = cpk_books(:cpk_great_author_first_book)
assert_no_queries do
assert books.include?(great_author_book)
end
end
def test_member_on_unloaded_relation_with_match
assert_queries_match(/1 AS one.*LIMIT/) do
assert_equal true, Customer.where(name: "David").member?(customers(:david))
end
end
def test_member_on_unloaded_relation_without_match
assert_queries_match(/1 AS one.*LIMIT/) do
assert_equal false, Customer.where(name: "David").member?(customers(:mary))
end
end
def test_member_on_unloaded_relation_with_mismatched_class
topic = topics(:first)
assert Customer.exists?(topic.id)
assert_no_queries do
assert_equal false, Customer.where(name: "David").member?(topic)
end
end
def test_member_on_unloaded_relation_with_offset
assert_queries_match(/ORDER BY name ASC/) do
assert_equal true, Customer.offset(1).order("name ASC").member?(customers(:mary))
end
end
def test_member_on_unloaded_relation_with_limit
mary = customers(:mary)
barney = customers(:barney)
david = customers(:david)
assert_equal false, Customer.order(id: :desc).limit(2).member?(david)
assert_equal true, Customer.order(id: :desc).limit(2).member?(barney)
assert_equal true, Customer.order(id: :desc).limit(2).member?(mary)
end
def test_member_on_loaded_relation_with_match
customers = Customer.where(name: "David").load
david = customers(:david)
assert_no_queries do
assert_equal true, customers.member?(david)
end
end
def test_member_on_loaded_relation_without_match
customers = Customer.where(name: "David").load
mary = customers(:mary)
assert_no_queries do
assert_equal false, customers.member?(mary)
end
end
def test_member_on_unloaded_relation_with_composite_primary_key
assert_queries_match(/1 AS one.*LIMIT/) do
book = cpk_books(:cpk_great_author_first_book)
assert Cpk::Book.where(title: "The first book").member?(book)
end
end
def test_member_on_loaded_relation_with_composite_primary_key
books = Cpk::Book.where(title: "The first book").load
great_author_book = cpk_books(:cpk_great_author_first_book)
assert_no_queries do
assert books.member?(great_author_book)
end
end
def test_find_by_array_of_one_id
assert_kind_of(Array, Topic.find([ 1 ]))
assert_equal(1, Topic.find([ 1 ]).length)
end
def test_find_by_ids
assert_equal 2, Topic.find(1, 2).size
assert_equal topics(:second).title, Topic.find([2]).first.title
end
def test_find_by_ids_with_limit_and_offset
assert_equal 2, Entrant.limit(2).find([1, 3, 2]).size
entrants = Entrant.limit(3).offset(2).find([1, 3, 2])
assert_equal 1, entrants.size
assert_equal "Ruby Guru", entrants.first.name
# Also test an edge case: If you have 11 results, and you set a
# limit of 3 and offset of 9, then you should find that there
# will be only 2 results, regardless of the limit.
devs = Developer.all
last_devs = Developer.limit(3).offset(9).find(devs.map(&:id).sort)
assert_equal 2, last_devs.size
assert_equal "fixture_10", last_devs[0].name
assert_equal "Jamis", last_devs[1].name
end
def test_find_with_large_number
assert_queries_count(0) do
assert_raises(ActiveRecord::RecordNotFound) { Topic.find("9999999999999999999999999999999") }
end
end
def test_find_by_with_large_number
assert_queries_count(0) do
assert_nil Topic.find_by(id: "9999999999999999999999999999999")
end
end
def test_find_by_id_with_large_number
assert_queries_count(0) do
assert_nil Topic.find_by_id("9999999999999999999999999999999")
end
end
def test_find_on_relation_with_large_number
assert_raises(ActiveRecord::RecordNotFound) do
Topic.where("1=1").find(9999999999999999999999999999999)
end
assert_equal topics(:first), Topic.where(id: [1, 9999999999999999999999999999999]).find(1)
end
def test_find_by_on_relation_with_large_number
assert_nil Topic.where("1=1").find_by(id: 9999999999999999999999999999999)
assert_equal topics(:first), Topic.where(id: [1, 9999999999999999999999999999999]).find_by(id: 1)
end
def test_find_by_bang_on_relation_with_large_number
assert_raises(ActiveRecord::RecordNotFound) do
Topic.where("1=1").find_by!(id: 9999999999999999999999999999999)
end
assert_equal topics(:first), Topic.where(id: [1, 9999999999999999999999999999999]).find_by!(id: 1)
end
def test_find_an_empty_array
empty_array = []
result = Topic.find(empty_array)
assert_equal [], result
assert_not_same empty_array, result
end
def test_find_doesnt_have_implicit_ordering
assert_queries_match(/^((?!ORDER).)*$/) { Topic.find(1) }
end
def test_find_by_ids_missing_one
assert_raise(ActiveRecord::RecordNotFound) { Topic.find(1, 2, 45) }
end
def test_find_with_group_and_sanitized_having_method
developers = Developer.group(:salary).having("sum(salary) > ?", 10000).select("salary").to_a
assert_equal 3, developers.size
assert_equal 3, developers.map(&:salary).uniq.size
assert developers.all? { |developer| developer.salary > 10000 }
end
def test_find_with_entire_select_statement
topics = Topic.find_by_sql "SELECT * FROM topics WHERE author_name = 'Mary'"
assert_equal(1, topics.size)
assert_equal(topics(:second).title, topics.first.title)
assert_async_equal topics, Topic.async_find_by_sql("SELECT * FROM topics WHERE author_name = 'Mary'")
end
def test_find_with_prepared_select_statement
topics = Topic.find_by_sql ["SELECT * FROM topics WHERE author_name = ?", "Mary"]
assert_equal(1, topics.size)
assert_equal(topics(:second).title, topics.first.title)
end
def test_find_by_sql_with_sti_on_joined_table
accounts = Account.find_by_sql("SELECT * FROM accounts INNER JOIN companies ON companies.id = accounts.firm_id")
assert_equal [Account], accounts.collect(&:class).uniq
end
def test_find_by_association_subquery
firm = companies(:first_firm)
assert_equal firm.account, Account.find_by(firm: Firm.where(id: firm))
assert_equal firm.account, Account.find_by(firm_id: Firm.where(id: firm))
end
def test_find_by_and_where_consistency_with_active_record_instance
firm = companies(:first_firm)
assert_equal Account.where(firm_id: firm).take, Account.find_by(firm_id: firm)
end
def test_find_by_with_alias
account = accounts(:last_account)
assert_equal account, Account.find_by(available_credit: account.available_credit)
end
def test_take
assert_equal topics(:first), Topic.where("title = 'The First Topic'").take
end
def test_take_failing
assert_nil Topic.where("title = 'This title does not exist'").take
end
def test_take_bang_present
assert_nothing_raised do
assert_equal topics(:second), Topic.where("title = 'The Second Topic of the day'").take!
end
end
def test_take_bang_missing
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.where("title = 'This title does not exist'").take!
end
end
def test_sole
assert_equal topics(:first), Topic.where("title = 'The First Topic'").sole
assert_equal topics(:first), Topic.find_sole_by("title = 'The First Topic'")
end
def test_sole_failing_none
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.where("title = 'This title does not exist'").sole
end
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.find_sole_by("title = 'This title does not exist'")
end
end
def test_sole_failing_many
assert_raises ActiveRecord::SoleRecordExceeded, match: "Wanted only one Topic" do
Topic.where("author_name = 'Carl'").sole
end
assert_raises ActiveRecord::SoleRecordExceeded, match: "Wanted only one Topic" do
Topic.find_sole_by("author_name = 'Carl'")
end
end
def test_sole_record_exceeded_record_accessor
relation = Topic.where("author_name = 'Carl'")
error = assert_raises ActiveRecord::SoleRecordExceeded, match: "Wanted only one Topic" do
relation.sole
end
assert_kind_of ActiveRecord::Relation, error.record
assert_equal relation.count, error.record.count
end
def test_sole_on_loaded_relation
relation = Topic.where("title = 'The First Topic'").load
expected_topic = topics(:first)
assert_no_queries do
assert_equal expected_topic, relation.sole
end
end
def test_first
assert_equal topics(:second).title, Topic.where("title = 'The Second Topic of the day'").first.title
end
def test_first_failing
assert_nil Topic.where("title = 'The Second Topic of the day!'").first
end
def test_first_bang_present
assert_nothing_raised do
assert_equal topics(:second), Topic.where("title = 'The Second Topic of the day'").first!
end
end
def test_first_bang_missing
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.where("title = 'This title does not exist'").first!
end
end
def test_first_have_primary_key_order_by_default
expected = topics(:first)
expected.touch # PostgreSQL changes the default order if no order clause is used
assert_equal expected, Topic.first
assert_equal expected, Topic.limit(5).first
assert_equal expected, Topic.order(nil).first
end
def test_model_class_responds_to_first_bang
assert Topic.first!
Topic.delete_all
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.first!
end
end
def test_second
assert_equal topics(:second).title, Topic.second.title
end
def test_second_with_offset
assert_equal topics(:fifth), Topic.offset(3).second
end
def test_second_have_primary_key_order_by_default
expected = topics(:second)
expected.touch # PostgreSQL changes the default order if no order clause is used
assert_equal expected, Topic.second
assert_equal expected, Topic.limit(5).second
assert_equal expected, Topic.order(nil).second
end
def test_model_class_responds_to_second_bang
assert Topic.second!
Topic.delete_all
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.second!
end
end
def test_third
assert_equal topics(:third).title, Topic.third.title
end
def test_third_with_offset
assert_equal topics(:fifth), Topic.offset(2).third
end
def test_third_have_primary_key_order_by_default
expected = topics(:third)
expected.touch # PostgreSQL changes the default order if no order clause is used
assert_equal expected, Topic.third
assert_equal expected, Topic.limit(5).third
assert_equal expected, Topic.order(nil).third
end
def test_model_class_responds_to_third_bang
assert Topic.third!
Topic.delete_all
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.third!
end
end
def test_fourth
assert_equal topics(:fourth).title, Topic.fourth.title
end
def test_fourth_with_offset
assert_equal topics(:fifth), Topic.offset(1).fourth
end
def test_fourth_have_primary_key_order_by_default
expected = topics(:fourth)
expected.touch # PostgreSQL changes the default order if no order clause is used
assert_equal expected, Topic.fourth
assert_equal expected, Topic.limit(5).fourth
assert_equal expected, Topic.order(nil).fourth
end
def test_model_class_responds_to_fourth_bang
assert Topic.fourth!
Topic.delete_all
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.fourth!
end
end
def test_fifth
assert_equal topics(:fifth).title, Topic.fifth.title
end
def test_fifth_with_offset
assert_equal topics(:fifth), Topic.offset(0).fifth
end
def test_fifth_have_primary_key_order_by_default
expected = topics(:fifth)
expected.touch # PostgreSQL changes the default order if no order clause is used
assert_equal expected, Topic.fifth
assert_equal expected, Topic.limit(5).fifth
assert_equal expected, Topic.order(nil).fifth
end
def test_model_class_responds_to_fifth_bang
assert Topic.fifth!
Topic.delete_all
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.fifth!
end
end
def test_second_to_last
assert_equal topics(:fourth).title, Topic.second_to_last.title
# test with offset
assert_equal topics(:fourth), Topic.offset(1).second_to_last
assert_equal topics(:fourth), Topic.offset(2).second_to_last
assert_equal topics(:fourth), Topic.offset(3).second_to_last
assert_nil Topic.offset(4).second_to_last
assert_nil Topic.offset(5).second_to_last
# test with limit
assert_nil Topic.limit(1).second
assert_nil Topic.limit(1).second_to_last
end
def test_second_to_last_have_primary_key_order_by_default
expected = topics(:fourth)
expected.touch # PostgreSQL changes the default order if no order clause is used
assert_equal expected, Topic.second_to_last
end
def test_model_class_responds_to_second_to_last_bang
assert Topic.second_to_last!
Topic.delete_all
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.second_to_last!
end
end
def test_third_to_last
assert_equal topics(:third).title, Topic.third_to_last.title
# test with offset
assert_equal topics(:third), Topic.offset(1).third_to_last
assert_equal topics(:third), Topic.offset(2).third_to_last
assert_nil Topic.offset(3).third_to_last
assert_nil Topic.offset(4).third_to_last
assert_nil Topic.offset(5).third_to_last
# test with limit
assert_nil Topic.limit(1).third
assert_nil Topic.limit(1).third_to_last
assert_nil Topic.limit(2).third
assert_nil Topic.limit(2).third_to_last
end
def test_third_to_last_have_primary_key_order_by_default
expected = topics(:third)
expected.touch # PostgreSQL changes the default order if no order clause is used
assert_equal expected, Topic.third_to_last
end
def test_model_class_responds_to_third_to_last_bang
assert Topic.third_to_last!
Topic.delete_all
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.third_to_last!
end
end
def test_nth_to_last_with_order_uses_limit
assert_queries_match(/ORDER BY #{Regexp.escape(quote_table_name("topics.id"))} DESC LIMIT/i) do
Topic.second_to_last
end
assert_queries_match(/ORDER BY #{Regexp.escape(quote_table_name("topics.updated_at"))} DESC LIMIT/i) do
Topic.order(:updated_at).second_to_last
end
end
def test_last_bang_present
assert_nothing_raised do
assert_equal topics(:second), Topic.where("title = 'The Second Topic of the day'").last!
end
end
def test_last_bang_missing
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.where("title = 'This title does not exist'").last!
end
end
def test_model_class_responds_to_last_bang
assert_equal topics(:fifth), Topic.last!
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.delete_all
Topic.last!
end
end
def test_take_and_first_and_last_with_integer_should_use_sql_limit
assert_queries_match(/LIMIT|ROWNUM <=|FETCH FIRST/) { Topic.take(3).entries }
assert_queries_match(/LIMIT|ROWNUM <=|FETCH FIRST/) { Topic.first(2).entries }
assert_queries_match(/LIMIT|ROWNUM <=|FETCH FIRST/) { Topic.last(5).entries }
end
def test_last_with_integer_and_order_should_keep_the_order
assert_equal Topic.order("title").to_a.last(2), Topic.order("title").last(2)
end
def test_last_with_integer_and_order_should_use_sql_limit
relation = Topic.order("title")
assert_queries_count(1) { relation.last(5) }
assert_not_predicate relation, :loaded?
end
def test_last_with_integer_and_reorder_should_use_sql_limit
relation = Topic.reorder("title")
assert_queries_count(1) { relation.last(5) }
assert_not_predicate relation, :loaded?
end
def test_last_on_loaded_relation_should_not_use_sql
relation = Topic.limit(10).load
assert_no_queries do
relation.last
relation.last(2)
end
end
def test_first_without_order_columns
assert_nil Edge.primary_key
assert_nil Edge.implicit_order_column
assert_nil Edge.query_constraints_list
error = assert_raises(ActiveRecord::MissingRequiredOrderError) do
Edge.all.first
end
assert_match(/Relation has no order values/, error.message)
end
# TODO: Remove this test when we remove the deprecated `raise_on_missing_required_finder_order_columns`
def test_first_without_order_columns_and_raise_on_missing_required_finder_order_columns_disabled
raise_on_missing_required_finder_order_columns_before = ActiveRecord.raise_on_missing_required_finder_order_columns
ActiveRecord.raise_on_missing_required_finder_order_columns = false
assert_nil Edge.primary_key
assert_nil Edge.implicit_order_column
assert_nil Edge.query_constraints_list
assert_nothing_raised do
assert_deprecated(/Calling order dependent finder methods/, ActiveRecord.deprecator) do
Edge.all.first
end
end
ensure
ActiveRecord.raise_on_missing_required_finder_order_columns = raise_on_missing_required_finder_order_columns_before
end
def test_first_with_at_least_primary_key
ordered_edge = Class.new(Edge) do
self.primary_key = "source_id"
end
assert_nothing_raised do
ordered_edge.all.first
end
end
def test_first_with_at_least_implict_order_column
ordered_edge = Class.new(Edge) do
self.implicit_order_column = "source_id"
end
assert_nothing_raised do
ordered_edge.all.first
end
end
def first_with_at_least_query_constraints
ordered_edge = Class.new(Edge) do
query_constraints "source_id"
end
assert_nothing_raised do
ordered_edge.all.first
end
end
def test_last_without_order_columns
assert_nil Edge.primary_key
assert_nil Edge.implicit_order_column
assert_nil Edge.query_constraints_list
error = assert_raises(ActiveRecord::MissingRequiredOrderError) do
Edge.all.last
end
assert_match(/Relation has no order values/, error.message)
end
# TODO: Remove this test when we remove `raise_on_missing_required_finder_order_columns`
def test_last_without_order_columns_and_raise_on_missing_required_finder_order_columns_disabled
raise_on_missing_required_finder_order_columns_before = ActiveRecord.raise_on_missing_required_finder_order_columns
ActiveRecord.raise_on_missing_required_finder_order_columns = false
assert_nil Edge.primary_key
assert_nil Edge.implicit_order_column
assert_nil Edge.query_constraints_list
error = assert_raises(ActiveRecord::IrreversibleOrderError) do
assert_deprecated(/Calling order dependent finder methods/, ActiveRecord.deprecator) do
Edge.all.last
end
end
assert_match(/Relation has no order values/, error.message)
ensure
ActiveRecord.raise_on_missing_required_finder_order_columns = raise_on_missing_required_finder_order_columns_before
end
def test_last_with_at_least_primary_key
ordered_edge = Class.new(Edge) do
self.primary_key = "source_id"
end
assert_nothing_raised do
ordered_edge.all.last
end
end
def test_last_with_at_least_implict_order_column
ordered_edge = Class.new(Edge) do
self.implicit_order_column = "source_id"
end
assert_nothing_raised do
ordered_edge.all.last
end
end
def test_last_with_at_least_query_constraints
ordered_edge = Class.new(Edge) do
query_constraints "source_id"
end
assert_nothing_raised do
ordered_edge.all.last
end
end
def test_last_with_irreversible_order_value
error = assert_raises(ActiveRecord::IrreversibleOrderError) do
Topic.order(Arel.sql("coalesce(author_name, title)")).last
end
assert_match(/Order .* cannot be reversed automatically/, error.message)
end
def test_last_on_relation_with_limit_and_offset
post = posts("sti_comments")
comments = post.comments.order(id: :asc)
assert_equal comments.limit(2).to_a.last, comments.limit(2).last
assert_equal comments.limit(2).to_a.last(2), comments.limit(2).last(2)
assert_equal comments.limit(2).to_a.last(3), comments.limit(2).last(3)
assert_equal comments.offset(2).to_a.last, comments.offset(2).last
assert_equal comments.offset(2).to_a.last(2), comments.offset(2).last(2)
assert_equal comments.offset(2).to_a.last(3), comments.offset(2).last(3)
comments = comments.offset(1)
assert_equal comments.limit(2).to_a.last, comments.limit(2).last
assert_equal comments.limit(2).to_a.last(2), comments.limit(2).last(2)
assert_equal comments.limit(2).to_a.last(3), comments.limit(2).last(3)
end
def test_first_on_relation_with_limit_and_offset
post = posts("sti_comments")
comments = post.comments.order(id: :asc)
assert_equal comments.limit(2).to_a.first, comments.limit(2).first
assert_equal comments.limit(2).to_a.first(2), comments.limit(2).first(2)
assert_equal comments.limit(2).to_a.first(3), comments.limit(2).first(3)
assert_equal comments.offset(2).to_a.first, comments.offset(2).first
assert_equal comments.offset(2).to_a.first(2), comments.offset(2).first(2)
assert_equal comments.offset(2).to_a.first(3), comments.offset(2).first(3)
comments = comments.offset(1)
assert_equal comments.limit(2).to_a.first, comments.limit(2).first
assert_equal comments.limit(2).to_a.first(2), comments.limit(2).first(2)
assert_equal comments.limit(2).to_a.first(3), comments.limit(2).first(3)
end
def test_first_have_determined_order_by_default
expected = [companies(:second_client), companies(:another_client)]
clients = Client.where(name: expected.map(&:name))
assert_equal expected, clients.first(2)
assert_equal expected, clients.limit(5).first(2)
assert_equal expected, clients.order(nil).first(2)
end
def test_implicit_order_column_is_configurable_with_a_single_value
old_implicit_order_column = Topic.implicit_order_column
Topic.implicit_order_column = "title"
assert_equal topics(:fifth), Topic.first
assert_equal topics(:third), Topic.last
assert_queries_match(/ORDER BY #{Regexp.escape(quote_table_name("topics.title"))} DESC, #{Regexp.escape(quote_table_name("topics.id"))} DESC LIMIT/i) {
Topic.last
}
ensure
Topic.implicit_order_column = old_implicit_order_column
end
def test_implicit_order_column_is_configurable_with_multiple_values
old_implicit_order_column = Topic.implicit_order_column
Topic.implicit_order_column = ["title", "author_name"]
assert_queries_match(/ORDER BY #{Regexp.escape(quote_table_name("topics.title"))} DESC, #{Regexp.escape(quote_table_name("topics.author_name"))} DESC, #{Regexp.escape(quote_table_name("topics.id"))} DESC LIMIT/i) {
Topic.last
}
ensure
Topic.implicit_order_column = old_implicit_order_column
end
def test_ordering_does_not_append_primary_keys_or_query_constraints_if_passed_an_implicit_order_column_array_ending_in_nil
old_implicit_order_column = Topic.implicit_order_column
Topic.implicit_order_column = ["author_name", nil]
assert_queries_match(/ORDER BY #{Regexp.escape(quote_table_name("topics.author_name"))} DESC LIMIT/i) {
Topic.last
}
ensure
Topic.implicit_order_column = old_implicit_order_column
end
def test_implicit_order_set_to_primary_key
old_implicit_order_column = Topic.implicit_order_column
Topic.implicit_order_column = "id"
assert_queries_match(/ORDER BY #{Regexp.escape(quote_table_name("topics.id"))} DESC LIMIT/i) {
Topic.last
}
ensure
Topic.implicit_order_column = old_implicit_order_column
end
def test_implicit_order_for_model_without_primary_key
old_implicit_order_column = NonPrimaryKey.implicit_order_column
NonPrimaryKey.implicit_order_column = "created_at"
assert_queries_match(/ORDER BY #{Regexp.escape(quote_table_name("non_primary_keys.created_at"))} DESC LIMIT/i) {
NonPrimaryKey.last
}
ensure
NonPrimaryKey.implicit_order_column = old_implicit_order_column
end
def test_implicit_order_column_reorders_query_constraints
ClothingItem.implicit_order_column = "color"
quoted_type = Regexp.escape(quote_table_name("clothing_items.clothing_type"))
quoted_color = Regexp.escape(quote_table_name("clothing_items.color"))
assert_queries_match(/ORDER BY #{quoted_color} ASC, #{quoted_type} ASC LIMIT/i) do
assert_kind_of ClothingItem, ClothingItem.first
end
ensure
ClothingItem.implicit_order_column = nil
end
def test_implicit_order_column_prepends_query_constraints
ClothingItem.implicit_order_column = "description"
quoted_type = Regexp.escape(quote_table_name("clothing_items.clothing_type"))
quoted_color = Regexp.escape(quote_table_name("clothing_items.color"))
quoted_description = Regexp.escape(quote_table_name("clothing_items.description"))
assert_queries_match(/ORDER BY #{quoted_description} ASC, #{quoted_type} ASC, #{quoted_color} ASC LIMIT/i) do
assert_kind_of ClothingItem, ClothingItem.first
end
ensure
ClothingItem.implicit_order_column = nil
end
def test_take_and_first_and_last_with_integer_should_return_an_array
assert_kind_of Array, Topic.take(5)
assert_kind_of Array, Topic.first(5)
assert_kind_of Array, Topic.last(5)
end
def test_unexisting_record_exception_handling
assert_raise(ActiveRecord::RecordNotFound) {
Topic.find(1).parent
}
Topic.find(2).topic
end
def test_find_only_some_columns
topic = Topic.select("author_name").find(1)
assert_raise(ActiveModel::MissingAttributeError) { topic.title }
assert_raise(ActiveModel::MissingAttributeError) { topic.title? }
assert_nil topic.read_attribute("title")
assert_equal "David", topic.author_name
assert_not topic.attribute_present?("title")
assert_not topic.attribute_present?(:title)
assert topic.attribute_present?("author_name")
assert_respond_to topic, "author_name"
end
def test_find_on_array_conditions
assert Topic.where(["approved = ?", false]).find(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.where(["approved = ?", true]).find(1) }
end
def test_find_on_hash_conditions
assert Topic.where(approved: false).find(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.where(approved: true).find(1) }
end
def test_find_on_hash_conditions_with_qualified_attribute_dot_notation_string
assert Topic.where("topics.approved" => false).find(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.where("topics.approved" => true).find(1) }
end
def test_find_on_hash_conditions_with_qualified_attribute_dot_notation_symbol
assert Topic.where('topics.approved': false).find(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.where('topics.approved': true).find(1) }
end
def test_find_on_hash_conditions_with_hashed_table_name
assert Topic.where(topics: { approved: false }).find(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.where(topics: { approved: true }).find(1) }
end
def test_find_on_combined_explicit_and_hashed_table_names
assert Topic.where("topics.approved" => false, topics: { author_name: "David" }).find(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.where("topics.approved" => true, topics: { author_name: "David" }).find(1) }
assert_raise(ActiveRecord::RecordNotFound) { Topic.where("topics.approved" => false, topics: { author_name: "Melanie" }).find(1) }
end
def test_find_with_hash_conditions_on_joined_table
firms = Firm.joins(:account).where(accounts: { credit_limit: 50 })
assert_equal 1, firms.size
assert_equal companies(:first_firm), firms.first
end
def test_find_with_hash_conditions_on_joined_table_and_with_range
firms = DependentFirm.joins(:account).where(name: "RailsCore", accounts: { credit_limit: 55..60 })
assert_equal 1, firms.size
assert_equal companies(:rails_core), firms.first
end
def test_find_on_hash_conditions_with_explicit_table_name_and_aggregate
david = customers(:david)
assert Customer.where("customers.name" => david.name, :address => david.address).find(david.id)
assert_raise(ActiveRecord::RecordNotFound) {
Customer.where("customers.name" => david.name + "1", :address => david.address).find(david.id)
}
end
def test_find_on_association_proxy_conditions
assert_equal [1, 2, 3, 5, 6, 7, 8, 9, 10, 12, 13], Comment.where(post_id: authors(:david).posts).map(&:id).sort
end
def test_find_on_hash_conditions_with_range
assert_equal [1, 2], Topic.where(id: 1..2).to_a.map(&:id).sort
assert_raise(ActiveRecord::RecordNotFound) { Topic.where(id: 2..3).find(1) }
end
def test_find_on_hash_conditions_with_end_exclusive_range
assert_equal [1, 2, 3], Topic.where(id: 1..3).to_a.map(&:id).sort
assert_equal [1, 2], Topic.where(id: 1...3).to_a.map(&:id).sort
assert_raise(ActiveRecord::RecordNotFound) { Topic.where(id: 2...3).find(3) }
end
def test_find_on_hash_conditions_with_multiple_ranges
assert_equal [1, 2, 3], Comment.where(id: 1..3, post_id: 1..2).to_a.map(&:id).sort
assert_equal [1], Comment.where(id: 1..1, post_id: 1..10).to_a.map(&:id).sort
end
def test_find_on_hash_conditions_with_array_of_integers_and_ranges
assert_equal [1, 2, 3, 5, 6, 7, 8, 9], Comment.where(id: [1..2, 3, 5, 6..8, 9]).to_a.map(&:id).sort
end
def test_find_on_hash_conditions_with_array_of_ranges
assert_equal [1, 2, 6, 7, 8], Comment.where(id: [1..2, 6..8]).to_a.map(&:id).sort
end
def test_find_on_hash_conditions_with_open_ended_range
assert_equal [1, 2, 3], Comment.where(id: Float::INFINITY..3).to_a.map(&:id).sort
end
def test_find_on_hash_conditions_with_numeric_range_for_string
topic = Topic.create!(title: "12 Factor App")
assert_equal [topic], Topic.where(title: 10..2).to_a
end
def test_find_on_multiple_hash_conditions
assert Topic.where(author_name: "David", title: "The First Topic", replies_count: 1, approved: false).find(1)
assert_raise(ActiveRecord::RecordNotFound) { Topic.where(author_name: "David", title: "The First Topic", replies_count: 1, approved: true).find(1) }
assert_raise(ActiveRecord::RecordNotFound) { Topic.where(author_name: "David", title: "HHC", replies_count: 1, approved: false).find(1) }
end
def test_condition_interpolation
assert_kind_of Firm, Company.where("name = '%s'", "37signals").first
assert_nil Company.where(["name = '%s'", "37signals!"]).first
assert_nil Company.where(["name = '%s'", "37signals!' OR 1=1"]).first
assert_kind_of Time, Topic.where(["id = %d", 1]).first.written_on
end
def test_condition_array_interpolation
assert_kind_of Firm, Company.where(["name = '%s'", "37signals"]).first
assert_nil Company.where(["name = '%s'", "37signals!"]).first
assert_nil Company.where(["name = '%s'", "37signals!' OR 1=1"]).first
assert_kind_of Time, Topic.where(["id = %d", 1]).first.written_on
end
def test_condition_hash_interpolation
assert_kind_of Firm, Company.where(name: "37signals").first
assert_nil Company.where(name: "37signals!").first
assert_kind_of Time, Topic.where(id: 1).first.written_on
end
def test_hash_condition_find_malformed
assert_raise(ActiveRecord::StatementInvalid) {
Company.where(id: 2, dhh: true).first
}
end
def test_hash_condition_find_with_escaped_characters
Company.create("name" => "Ain't noth'n like' \#stuff")
assert Company.where(name: "Ain't noth'n like' \#stuff").first
end
def test_hash_condition_find_with_array
p1, p2 = Post.limit(2).order("id asc").to_a
assert_equal [p1, p2], Post.where(id: [p1, p2]).order("id asc").to_a
assert_equal [p1, p2], Post.where(id: [p1, p2.id]).order("id asc").to_a
end
def test_hash_condition_find_with_nil
topic = Topic.where(last_read: nil).first
assert_not_nil topic
assert_nil topic.last_read
end
def test_hash_condition_find_with_aggregate_having_one_mapping
balance = customers(:david).balance
assert_kind_of Money, balance
found_customer = Customer.where(balance: balance).first
assert_equal customers(:david), found_customer
end
def test_hash_condition_find_with_aggregate_having_three_mappings_array
david_address = customers(:david).address
zaphod_address = customers(:zaphod).address
barney_address = customers(:barney).address
assert_kind_of Address, david_address
assert_kind_of Address, zaphod_address
found_customers = Customer.where(address: [david_address, zaphod_address, barney_address])
assert_equal [customers(:david), customers(:zaphod), customers(:barney)], found_customers.sort_by(&:id)
end
def test_hash_condition_find_with_aggregate_having_one_mapping_array
david_balance = customers(:david).balance
zaphod_balance = customers(:zaphod).balance
assert_kind_of Money, david_balance
assert_kind_of Money, zaphod_balance
found_customers = Customer.where(balance: [david_balance, zaphod_balance])
assert_equal [customers(:david), customers(:zaphod)], found_customers.sort_by(&:id)
assert_equal Customer.where(balance: [david_balance.amount, zaphod_balance.amount]).to_sql, found_customers.to_sql
end
def test_hash_condition_find_with_aggregate_attribute_having_same_name_as_field_and_key_value_being_aggregate
gps_location = customers(:david).gps_location
assert_kind_of GpsLocation, gps_location
found_customer = Customer.where(gps_location: gps_location).first
assert_equal customers(:david), found_customer
end
def test_hash_condition_find_with_aggregate_having_one_mapping_and_key_value_being_attribute_value
balance = customers(:david).balance
assert_kind_of Money, balance
found_customer = Customer.where(balance: balance.amount).first
assert_equal customers(:david), found_customer
end
def test_hash_condition_find_with_aggregate_attribute_having_same_name_as_field_and_key_value_being_attribute_value
gps_location = customers(:david).gps_location
assert_kind_of GpsLocation, gps_location
found_customer = Customer.where(gps_location: gps_location.gps_location).first
assert_equal customers(:david), found_customer
end
def test_hash_condition_find_with_aggregate_having_three_mappings
address = customers(:david).address
assert_kind_of Address, address
customers = Customer.where(address: address).order(:id)
assert_equal [customers(:david)], customers
assert_equal customers(:david, :mary), customers.unscope(where: [:address_city, :address_country])
end
def test_hash_condition_find_with_one_condition_being_aggregate_and_another_not
address = customers(:david).address
assert_kind_of Address, address
found_customer = Customer.where(address: address, name: customers(:david).name).first
assert_equal customers(:david), found_customer
end
def test_hash_condition_find_nil_with_aggregate_having_one_mapping
assert_nil customers(:zaphod).gps_location
found_customer = Customer.where(gps_location: nil, name: customers(:zaphod).name).first
assert_equal customers(:zaphod), found_customer
end
def test_hash_condition_find_nil_with_aggregate_having_multiple_mappings
customers(:david).update(address: nil)
assert_nil customers(:david).address_street
assert_nil customers(:david).address_city
found_customer = Customer.where(address: nil, name: customers(:david).name).first
assert_equal customers(:david), found_customer
end
def test_hash_condition_find_empty_array_with_aggregate_having_multiple_mappings
assert_nil Customer.where(address: []).first
end
def test_condition_utc_time_interpolation_with_default_timezone_local
with_env_tz "America/New_York" do
with_timezone_config default: :local do
topic = Topic.first
assert_equal topic, Topic.where(["written_on = ?", topic.written_on.getutc]).first
end
end
end
def test_hash_condition_utc_time_interpolation_with_default_timezone_local
with_env_tz "America/New_York" do
with_timezone_config default: :local do
topic = Topic.first
assert_equal topic, Topic.where(written_on: topic.written_on.getutc).first
end
end
end
def test_condition_local_time_interpolation_with_default_timezone_utc
with_env_tz "America/New_York" do
with_timezone_config default: :utc do
topic = Topic.first
assert_equal topic, Topic.where(["written_on = ?", topic.written_on.getlocal]).first
end
end
end
def test_hash_condition_local_time_interpolation_with_default_timezone_utc
with_env_tz "America/New_York" do
with_timezone_config default: :utc do
topic = Topic.first
assert_equal topic, Topic.where(written_on: topic.written_on.getlocal).first
end
end
end
def test_bind_variables
assert_kind_of Firm, Company.where(["name = ?", "37signals"]).first
assert_nil Company.where(["name = ?", "37signals!"]).first
assert_nil Company.where(["name = ?", "37signals!' OR 1=1"]).first
assert_kind_of Time, Topic.where(["id = ?", 1]).first.written_on
assert_raise(ActiveRecord::PreparedStatementInvalid) {
Company.where(["id=? AND name = ?", 2]).first
}
assert_raise(ActiveRecord::PreparedStatementInvalid) {
Company.where(["id=?", 2, 3, 4]).first
}
end
def test_bind_variables_with_quotes
Company.create("name" => "37signals' go'es against")
assert Company.where(["name = ?", "37signals' go'es against"]).first
end
def test_named_bind_variables_with_quotes
Company.create("name" => "37signals' go'es against")
assert Company.where(["name = :name", { name: "37signals' go'es against" }]).first
end
def test_named_bind_variables
assert_kind_of Firm, Company.where(["name = :name", { name: "37signals" }]).first
assert_nil Company.where(["name = :name", { name: "37signals!" }]).first
assert_nil Company.where(["name = :name", { name: "37signals!' OR 1=1" }]).first
assert_kind_of Time, Topic.where(["id = :id", { id: 1 }]).first.written_on
end
def test_count_by_sql
assert_equal(0, Entrant.count_by_sql("SELECT COUNT(*) FROM entrants WHERE id > 3"))
assert_equal(1, Entrant.count_by_sql(["SELECT COUNT(*) FROM entrants WHERE id > ?", 2]))
assert_equal(2, Entrant.count_by_sql(["SELECT COUNT(*) FROM entrants WHERE id > ?", 1]))
assert_async_equal 2, Entrant.async_count_by_sql(["SELECT COUNT(*) FROM entrants WHERE id > ?", 1])
end
def test_find_by_one_attribute
assert_equal topics(:first), Topic.find_by_title("The First Topic")
assert_nil Topic.find_by_title("The First Topic!")
end
def test_find_by_one_attribute_bang
assert_equal topics(:first), Topic.find_by_title!("The First Topic")
assert_raises ActiveRecord::RecordNotFound, match: "Couldn't find Topic" do
Topic.find_by_title!("The First Topic!")
end
end
def test_find_by_on_attribute_that_is_a_reserved_word
dog_alias = "Dog"
dog = Dog.create(alias: dog_alias)
assert_equal dog, Dog.find_by_alias(dog_alias)
end
def test_find_by_one_attribute_that_is_an_alias
assert_equal topics(:first), Topic.find_by_heading("The First Topic")
assert_nil Topic.find_by_heading("The First Topic!")
end
def test_find_by_one_attribute_bang_with_blank_defined
blank_topic = BlankTopic.create(title: "The Blank One")
assert_equal blank_topic, BlankTopic.find_by_title!("The Blank One")
end
def test_find_by_one_attribute_with_conditions
assert_equal accounts(:rails_core_account), Account.where("firm_id = ?", 6).find_by_credit_limit(50)
end
def test_find_by_one_attribute_that_is_an_aggregate
address = customers(:david).address
assert_kind_of Address, address
found_customer = Customer.find_by_address(address)
assert_equal customers(:david), found_customer
end
def test_find_by_one_attribute_that_is_an_aggregate_with_one_attribute_difference
address = customers(:david).address
assert_kind_of Address, address
missing_address = Address.new(address.street, address.city, address.country + "1")
assert_nil Customer.find_by_address(missing_address)
missing_address = Address.new(address.street, address.city + "1", address.country)
assert_nil Customer.find_by_address(missing_address)
missing_address = Address.new(address.street + "1", address.city, address.country)
assert_nil Customer.find_by_address(missing_address)
end
def test_find_by_two_attributes_that_are_both_aggregates
balance = customers(:david).balance
address = customers(:david).address
assert_kind_of Money, balance
assert_kind_of Address, address
found_customer = Customer.find_by_balance_and_address(balance, address)
assert_equal customers(:david), found_customer
end
def test_find_by_two_attributes_with_one_being_an_aggregate
balance = customers(:david).balance
assert_kind_of Money, balance
found_customer = Customer.find_by_balance_and_name(balance, customers(:david).name)
assert_equal customers(:david), found_customer
end
def test_dynamic_finder_on_one_attribute_with_conditions_returns_same_results_after_caching
# ensure this test can run independently of order
Account.singleton_class.remove_method :find_by_credit_limit if Account.public_methods.include?(:find_by_credit_limit)
a = Account.where("firm_id = ?", 6).find_by_credit_limit(50)
assert_equal a, Account.where("firm_id = ?", 6).find_by_credit_limit(50) # find_by_credit_limit has been cached
end
def test_find_by_one_attribute_with_several_options
assert_equal accounts(:unknown), Account.order("id DESC").where("id != ?", 3).find_by_credit_limit(50)
end
def test_find_by_one_missing_attribute
assert_raise(NoMethodError) { Topic.find_by_undertitle("The First Topic!") }
end
def test_find_by_invalid_method_syntax
assert_raise(NoMethodError) { Topic.fail_to_find_by_title("The First Topic") }
assert_raise(NoMethodError) { Topic.find_by_title?("The First Topic") }
assert_raise(NoMethodError) { Topic.fail_to_find_or_create_by_title("Nonexistent Title") }
assert_raise(NoMethodError) { Topic.find_or_create_by_title?("Nonexistent Title") }
end
def test_find_by_two_attributes
assert_equal topics(:first), Topic.find_by_title_and_author_name("The First Topic", "David")
assert_nil Topic.find_by_title_and_author_name("The First Topic", "Mary")
end
def test_find_by_two_attributes_but_passing_only_one
assert_raise(ArgumentError) { Topic.find_by_title_and_author_name("The First Topic") }
end
def test_find_by_nil_attribute
topic = Topic.find_by_last_read nil
assert_not_nil topic
assert_nil topic.last_read
end
def test_find_by_nil_and_not_nil_attributes
topic = Topic.find_by_last_read_and_author_name nil, "Mary"
assert_equal "Mary", topic.author_name
end
def test_find_with_bad_sql
assert_raise(ActiveRecord::StatementInvalid) { Topic.find_by_sql "select 1 from badtable" }
end
def test_joins_dont_clobber_id
first = Firm.
joins("INNER JOIN companies clients ON clients.firm_id = companies.id").
where("companies.id = 1").first
assert_equal 1, first.id
end
def test_joins_with_string_array
person_with_reader_and_post = Post.
joins(["INNER JOIN categorizations ON categorizations.post_id = posts.id",
"INNER JOIN categories ON categories.id = categorizations.category_id AND categories.type = 'SpecialCategory'"
])
assert_equal 1, person_with_reader_and_post.size
end
def test_find_by_id_with_conditions_with_or
assert_nothing_raised do
Post.where("posts.id <= 3 OR posts.#{ARTest::QUOTED_TYPE} = 'Post'").find([1, 2, 3])
end
end
def test_find_ignores_previously_inserted_record
Post.create!(title: "test", body: "it out")
assert_equal [], Post.where(id: nil)
end
def test_find_by_empty_ids
assert_equal [], Post.find([])
end
def test_find_by_empty_in_condition
assert_equal [], Post.where("id in (?)", [])
end
def test_find_by_records
p1, p2 = Post.limit(2).order("id asc").to_a
assert_equal [p1, p2], Post.where(["id in (?)", [p1, p2]]).order("id asc")
assert_equal [p1, p2], Post.where(["id in (?)", [p1, p2.id]]).order("id asc")
end
def test_select_value
assert_equal "37signals", Company.lease_connection.select_value("SELECT name FROM companies WHERE id = 1")
assert_nil Company.lease_connection.select_value("SELECT name FROM companies WHERE id = -1")
# make sure we didn't break count...
assert_equal 0, Company.count_by_sql("SELECT COUNT(*) FROM companies WHERE name = 'Halliburton'")
assert_equal 1, Company.count_by_sql("SELECT COUNT(*) FROM companies WHERE name = '37signals'")
end
def test_select_values
assert_equal ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "15"], Company.lease_connection.select_values("SELECT id FROM companies ORDER BY id").map!(&:to_s)
assert_equal ["37signals", "Summit", "Microsoft", "Flamboyant Software", "Ex Nihilo", "RailsCore", "Leetsoft", "Jadedpixel", "Odegy", "Ex Nihilo Part Deux", "Apex", "RVshare"], Company.lease_connection.select_values("SELECT name FROM companies ORDER BY id")
end
def test_select_rows
assert_equal(
[["1", "1", nil, "37signals"],
["2", "1", "2", "Summit"],
["3", "1", "1", "Microsoft"]],
Company.lease_connection.select_rows("SELECT id, firm_id, client_of, name FROM companies WHERE id IN (1,2,3) ORDER BY id").map { |i| i.map { |j| j.to_s unless j.nil? } })
assert_equal [["1", "37signals"], ["2", "Summit"], ["3", "Microsoft"]],
Company.lease_connection.select_rows("SELECT id, name FROM companies WHERE id IN (1,2,3) ORDER BY id").map { |i| i.map { |j| j.to_s unless j.nil? } }
end
def test_find_with_order_on_included_associations_with_construct_finder_sql_for_association_limiting_and_is_distinct
assert_equal 2, Post.includes(authors: :author_address).
where.not(author_addresses: { id: nil }).
order("author_addresses.id DESC").limit(2).to_a.size
assert_equal 3, Post.includes(author: :author_address, authors: :author_address).
where.not(author_addresses_authors: { id: nil }).
order("author_addresses_authors.id DESC").limit(3).to_a.size
end
def test_find_with_eager_loading_collection_and_ordering_by_collection_primary_key
assert_equal Post.first, Post.eager_load(comments: :ratings).
order("posts.id, ratings.id, comments.id").first
end
def test_find_with_nil_inside_set_passed_for_one_attribute
client_of = Company.
where(client_of: [2, 1, nil],
name: ["37signals", "Summit", "Microsoft"]).
order("client_of DESC").
map(&:client_of)
assert_includes client_of, nil
assert_equal [2, 1].sort, client_of.compact.sort
end
def test_find_with_nil_inside_set_passed_for_attribute
client_of = Company.
where(client_of: [nil]).
order("client_of DESC").
map(&:client_of)
assert_equal [], client_of.compact
end
def test_with_limiting_with_custom_select
posts = Post.references(:authors).merge(
includes: :author, select: 'posts.*, authors.id as "author_id"',
limit: 3, order: "posts.id"
).to_a
assert_equal 3, posts.size
assert_equal [1, 1, nil], posts.map(&:author_id)
end
def test_custom_select_takes_precedence_over_original_value
posts = Post.select("UPPER(title) AS title")
assert_equal "WELCOME TO THE WEBLOG", posts.first.title
assert_equal "WELCOME TO THE WEBLOG", posts.preload(:comments).first.title
assert_equal "WELCOME TO THE WEBLOG", posts.eager_load(:comments).first.title
end
def test_eager_load_for_no_has_many_with_limit_and_joins_for_has_many
relation = Post.eager_load(:author).joins(comments: :post)
assert_equal 5, relation.to_a.size
assert_equal 5, relation.limit(5).to_a.size
end
def test_eager_load_for_no_has_many_with_limit_and_left_joins_for_has_many
relation = Post.eager_load(:author).left_joins(comments: :post)
assert_equal 11, relation.to_a.size
assert_equal 11, relation.limit(11).to_a.size
end
def test_find_one_message_on_primary_key
e = assert_raises(ActiveRecord::RecordNotFound) do
Car.find(0)
end
assert_equal 0, e.id
assert_equal "id", e.primary_key
assert_equal "Car", e.model
assert_equal "Couldn't find Car with 'id'=0", e.message
end
def test_find_one_message_with_custom_primary_key
table_with_custom_primary_key do |model|
model.primary_key = :name
e = assert_raises(ActiveRecord::RecordNotFound) do
model.find "Hello World!"
end
assert_equal %{Couldn't find MercedesCar with 'name'="Hello World!"}, e.message
end
end
def test_find_some_message_with_custom_primary_key
table_with_custom_primary_key do |model|
model.primary_key = :name
e = assert_raises(ActiveRecord::RecordNotFound) do
model.find "Hello", "World!"
end
assert_equal %{Couldn't find all MercedesCars with 'name': ("Hello", "World!") (found 0 results, but was looking for 2).}, e.message
end
end
def test_find_without_primary_key
assert_raises(ActiveRecord::UnknownPrimaryKey) do
Matey.find(1)
end
end
def test_finder_with_offset_string
assert_nothing_raised { Topic.offset("3").to_a }
end
test "find_by with hash conditions returns the first matching record" do
assert_equal posts(:eager_other), Post.find_by(id: posts(:eager_other).id)
end
test "find_by with non-hash conditions returns the first matching record" do
assert_equal posts(:eager_other), Post.find_by("id = #{posts(:eager_other).id}")
end
test "find_by with multi-arg conditions returns the first matching record" do
assert_equal posts(:eager_other), Post.find_by("id = ?", posts(:eager_other).id)
end
test "find_by with range conditions returns the first matching record" do
assert_equal posts(:eager_other), Post.find_by(id: posts(:eager_other).id...posts(:misc_by_bob).id)
end
test "find_by returns nil if the record is missing" do
assert_nil Post.find_by("1 = 0")
end
test "find_by with associations" do
assert_equal authors(:david), Post.find_by(author: authors(:david)).author
assert_equal authors(:mary), Post.find_by(author: authors(:mary)).author
end
test "find_by doesn't have implicit ordering" do
assert_queries_match(/^((?!ORDER).)*$/) { Post.find_by(id: posts(:eager_other).id) }
end
test "find_by! with hash conditions returns the first matching record" do
assert_equal posts(:eager_other), Post.find_by!(id: posts(:eager_other).id)
end
test "find_by! with non-hash conditions returns the first matching record" do
assert_equal posts(:eager_other), Post.find_by!("id = #{posts(:eager_other).id}")
end
test "find_by! with multi-arg conditions returns the first matching record" do
assert_equal posts(:eager_other), Post.find_by!("id = ?", posts(:eager_other).id)
end
test "find_by! doesn't have implicit ordering" do
assert_queries_match(/^((?!ORDER).)*$/) { Post.find_by!(id: posts(:eager_other).id) }
end
test "find_by! raises RecordNotFound if the record is missing" do
error = assert_raises(ActiveRecord::RecordNotFound) do
Post.find_by!("1 = 0")
end
assert_equal "Couldn't find Post with [WHERE (1 = 0)]", error.message
end
test "find on a scope does not perform statement caching" do
honda = cars(:honda)
zyke = cars(:zyke)
tire = honda.tires.create!
tire2 = zyke.tires.create!
assert_equal tire, honda.tires.custom_find(tire.id)
assert_equal tire2, zyke.tires.custom_find(tire2.id)
end
test "find_by on a scope does not perform statement caching" do
honda = cars(:honda)
zyke = cars(:zyke)
tire = honda.tires.create!
tire2 = zyke.tires.create!
assert_equal tire, honda.tires.custom_find_by(id: tire.id)
assert_equal tire2, zyke.tires.custom_find_by(id: tire2.id)
end
test "#skip_query_cache! for #exists?" do
Topic.cache do
assert_queries_count(1) do
Topic.exists?
Topic.exists?
end
assert_queries_count(2) do
Topic.all.skip_query_cache!.exists?
Topic.all.skip_query_cache!.exists?
end
end
end
test "#skip_query_cache! for #exists? with a limited eager load" do
Topic.cache do
assert_queries_count(1) do
Topic.eager_load(:replies).limit(1).exists?
Topic.eager_load(:replies).limit(1).exists?
end
assert_queries_count(2) do
Topic.eager_load(:replies).limit(1).skip_query_cache!.exists?
Topic.eager_load(:replies).limit(1).skip_query_cache!.exists?
end
end
end
test "#last for a model with composite query constraints" do
quoted_type = Regexp.escape(quote_table_name("clothing_items.clothing_type"))
quoted_color = Regexp.escape(quote_table_name("clothing_items.color"))
assert_queries_match(/ORDER BY #{quoted_type} DESC, #{quoted_color} DESC LIMIT/i) do
assert_kind_of ClothingItem, ClothingItem.last
end
end
test "#first for a model with composite query constraints" do
quoted_type = Regexp.escape(quote_table_name("clothing_items.clothing_type"))
quoted_color = Regexp.escape(quote_table_name("clothing_items.color"))
assert_queries_match(/ORDER BY #{quoted_type} ASC, #{quoted_color} ASC LIMIT/i) do
assert_kind_of ClothingItem, ClothingItem.first
end
end
test "#find with a single composite primary key" do
book = cpk_books(:cpk_great_author_first_book)
assert_equal book, Cpk::Book.find(book.id)
end
test "find with a single composite primary key wrapped in an array" do
book = cpk_books(:cpk_great_author_first_book)
assert_equal [book], Cpk::Book.find([book.id])
end
test "find with a multiple sets of composite primary key" do
books = [cpk_books(:cpk_great_author_first_book), cpk_books(:cpk_great_author_second_book)]
ids = books.map(&:id)
result = Cpk::Book.find(*ids)
assert_equal ids, result.map(&:id)
end
test "find with a multiple sets of composite primary key wrapped in an array" do
books = [cpk_books(:cpk_great_author_first_book), cpk_books(:cpk_great_author_second_book)]
assert_equal books.map(&:id), Cpk::Book.where(revision: 1).find(books.map(&:id)).map(&:id)
end
test "find with a multiple sets of composite primary key wrapped in an array ordered" do
books = [cpk_books(:cpk_great_author_first_book), cpk_books(:cpk_great_author_second_book)]
assert_equal books.map(&:id), Cpk::Book.order(author_id: :asc).find(books.map(&:id)).map(&:id)
end
test "#find_by with composite primary key" do
book = cpk_books(:cpk_book_with_generated_pk)
assert_equal cpk_reviews(:first_book_review), Cpk::Review.find_by(book: book)
end
test "#find_by with composite primary key and query caching" do
book = cpk_books(:cpk_book_with_generated_pk)
Cpk::Review.cache do
assert_queries_count(1) do
Cpk::Review.find_by(book: book)
Cpk::Review.find_by(book: book)
end
end
end
private
def table_with_custom_primary_key
yield(Class.new(Toy) do
def self.name
"MercedesCar"
end
end)
end
end | ruby | github | https://github.com/rails/rails | activerecord/test/cases/finder_test.rb |
{% for widget in widget.subwidgets -%}{% include widget.template_name %}{%- endfor %} | html | github | https://github.com/django/django | django/forms/jinja2/django/forms/widgets/multiwidget.html |
import random
import spriteobj
# By Willi Kappler <grandor@gmx.de>
# Licensed under GPL
class Firedevil(spriteobj.SpriteObj):
"Class for the fire devil: falls, removes ice-blocks"
def __init__(self, screen, level, gfx, x, y):
spriteobj.SpriteObj.__init__(self, screen, level, gfx, x, y)
self.movingLeftAnim = [(0, 80), (1, 80), (2, 80), (3, 80)]
self.movingRightAnim = [(5, 80), (6, 80), (7, 80), (8, 80)]
self.turningLeftAnim = [(4, 40), (4, 40)]
self.turningRightAnim = [(4, 40), (4, 40)]
self.sleepingAnim = [(4, 40), (4, 40)]
self.move = self.move4
self.moveLeft()
self.sleep = self.sleep1
self.sleepMax = 6000
self.name = "Firedevil"
def check(self):
if self.verticalMovement == 0:
bt = self.bottomTile()
if bt == 0:
self.stop()
self.fall()
elif bt == 1:
self.level.frontData[self.bottomSpotOut][self.midX] = 0
self.screen.blit(self.level.backGfx[self.level.backData[self.bottomSpotOut][self.midX]], (self.midX*32, self.bottomSpotOut*32))
tt = self.topTile()
if tt == 1:
self.level.frontData[self.topSpotOut][self.midX] = 0
self.screen.blit(self.level.backGfx[self.level.backData[self.topSpotOut][self.midX]], (self.midX*32, self.topSpotOut*32))
if self.horizontalMovement == self.movingRight:
if self.rightTile() == 1:
self.level.frontData[self.midY][self.rightSpotOut] = 0
self.screen.blit(self.level.backGfx[self.level.backData[self.midY][self.rightSpotOut]], (self.rightSpotOut*32, self.midY*32))
self.turnLeft()
elif self.wallRight():
self.turnLeft()
elif self.horizontalMovement == self.movingLeft:
if self.leftTile() == 1:
self.level.frontData[self.midY][self.leftSpotOut] = 0
self.screen.blit(self.level.backGfx[self.level.backData[self.midY][self.leftSpotOut]], (self.leftSpotOut*32, self.midY*32))
self.turnRight()
elif self.wallLeft():
self.turnRight()
elif self.verticalMovement == self.falling:
self.checkFalling() | unknown | codeparrot/codeparrot-clean | ||
"""
Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
from __future__ import division, absolute_import, print_function
import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
from numpy.core.numeric import asanyarray
from numpy.core import numerictypes as nt
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False):
return umr_maximum(a, axis, None, out, keepdims)
def _amin(a, axis=None, out=None, keepdims=False):
return umr_minimum(a, axis, None, out, keepdims)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_sum(a, axis, dtype, out, keepdims)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_prod(a, axis, dtype, out, keepdims)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
def _all(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_all(a, axis, dtype, out, keepdims)
def _count_reduce_items(arr, axis):
if axis is None:
axis = tuple(range(arr.ndim))
if not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[ax]
return items
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up first
if rcount == 0:
warnings.warn("Mean of empty slice.", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
ret = umr_sum(arr, axis, dtype, out, keepdims)
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis)
# Make this warning show up on top.
if ddof >= rcount:
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning)
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
# Compute the mean.
# Note that if dtype is not of inexact type then arraymean will
# not be either.
arrmean = umr_sum(arr, axis, dtype, keepdims=True)
if isinstance(arrmean, mu.ndarray):
arrmean = um.true_divide(
arrmean, rcount, out=arrmean, casting='unsafe', subok=False)
else:
arrmean = arrmean.dtype.type(arrmean / rcount)
# Compute sum of squared deviations from mean
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, nt.complexfloating):
x = um.multiply(x, um.conjugate(x), out=x).real
else:
x = um.multiply(x, x, out=x)
ret = umr_sum(x, axis, dtype, out, keepdims)
# Compute degrees of freedom and make sure it is not negative.
rcount = max([rcount - ddof, 0])
# divide by degrees of freedom
if isinstance(ret, mu.ndarray):
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret | unknown | codeparrot/codeparrot-clean | ||
{
"PUNSUBSCRIBE": {
"summary": "Stops listening to messages published to channels that match one or more patterns.",
"complexity": "O(N) where N is the number of patterns to unsubscribe.",
"group": "pubsub",
"since": "2.0.0",
"arity": -1,
"function": "punsubscribeCommand",
"command_flags": [
"PUBSUB",
"NOSCRIPT",
"LOADING",
"STALE",
"SENTINEL"
],
"arguments": [
{
"name": "pattern",
"type": "pattern",
"optional": true,
"multiple": true
}
]
}
} | json | github | https://github.com/redis/redis | src/commands/punsubscribe.json |
import numpy as np
import numpy.random as npr
import kayak
from . import *
def test_elemmult_values_1():
npr.seed(1)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.ElemMult(A, B)
assert C.shape == np_A.shape
assert np.all( close_float(C.value, np_A*np_B))
def test_elemmult_values_2():
npr.seed(2)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
np_C = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.Parameter(np_C)
D = kayak.ElemMult(A, B, C)
assert D.shape == np_A.shape
assert np.all( close_float(D.value, np_A*np_B*np_C))
def test_elemmult_values_3():
npr.seed(7)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
D = kayak.ElemMult(A, B, A)
assert D.shape == (5,6)
assert np.all( close_float(D.value, np_A**2 * np_B))
def test_elemmult_values_4():
npr.seed(1)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,1)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.ElemMult(A, B)
assert C.shape == (5,6)
assert np.all( close_float(C.value, np_A*np_B))
def test_elemmult_values_5():
npr.seed(2)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,1)
np_B = npr.randn(1,6)
np_C = npr.randn(1,1)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.Parameter(np_C)
D = kayak.ElemMult(A, B, C)
assert D.shape == (5,6)
assert np.all( close_float(D.value, np_A*np_B*np_C))
def test_elemmult_values_6():
npr.seed(7)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(1,1)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
D = kayak.ElemMult(A, B, A)
assert D.shape == (5,6)
assert np.all( close_float(D.value, np_A**2 * np_B))
def test_elemmult_grad_1():
npr.seed(8)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.ElemMult(A, B)
D = kayak.MatSum(C)
D.value
assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
def test_elemmult_grad_2():
npr.seed(9)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
np_C = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.Parameter(np_C)
D = kayak.ElemMult(A, B, C)
E = kayak.MatSum(D)
E.value
assert E.grad(A).shape == np_A.shape
assert E.grad(B).shape == np_B.shape
assert E.grad(C).shape == np_C.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(C, E) < MAX_GRAD_DIFF
def test_elemmult_grad_3():
npr.seed(14)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
D = kayak.ElemMult(A, B, A)
E = kayak.MatSum(D)
E.value
assert E.grad(A).shape == np_A.shape
assert E.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
def test_elemmult_grad_4():
npr.seed(15)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
A = kayak.Parameter(np_A)
D = kayak.ElemMult(A, A)
E = kayak.MatSum(D)
E.value
assert E.grad(A).shape == np_A.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
def test_elemmult_grad_5():
npr.seed(8)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,1)
np_B = npr.randn(5,6)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.ElemMult(A, B)
D = kayak.MatSum(C)
D.value
assert D.grad(A).shape == np_A.shape
assert D.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, D) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, D) < MAX_GRAD_DIFF
def test_elemmult_grad_6():
npr.seed(9)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,1)
np_B = npr.randn(1,6)
np_C = npr.randn(1,1)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
C = kayak.Parameter(np_C)
D = kayak.ElemMult(A, B, C)
E = kayak.MatSum(D)
E.value
assert E.grad(A).shape == np_A.shape
assert E.grad(B).shape == np_B.shape
assert E.grad(C).shape == np_C.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(C, E) < MAX_GRAD_DIFF
def test_elemmult_grad_7():
npr.seed(14)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
np_B = npr.randn(1,1)
A = kayak.Parameter(np_A)
B = kayak.Parameter(np_B)
D = kayak.ElemMult(A, B, A)
E = kayak.MatSum(D)
E.value
assert E.grad(A).shape == np_A.shape
assert E.grad(B).shape == np_B.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF
assert kayak.util.checkgrad(B, E) < MAX_GRAD_DIFF
def test_elemmult_grad_8():
npr.seed(15)
for ii in xrange(NUM_TRIALS):
np_A = npr.randn(5,6)
A = kayak.Parameter(np_A)
D = kayak.ElemMult(A, A)
E = kayak.MatSum(D)
assert E.grad(A).shape == np_A.shape
assert kayak.util.checkgrad(A, E) < MAX_GRAD_DIFF | unknown | codeparrot/codeparrot-clean | ||
"""County level agg."""
from pyiem.util import get_dbconn
from pyiem.plot.geoplot import MapPlot
from geopandas import read_postgis
from pandas.io.sql import read_sql
def main():
"""Go Main Go."""
years = 12.0 # 2008 - 2019
pgconn = get_dbconn("idep")
postgis = get_dbconn("postgis")
# Get the initial geometries
df = read_postgis(
"""
SELECT ugc, name, geom from ugcs WHERE end_ts is null and
substr(ugc, 1, 3) = 'IAC'
""",
postgis,
index_col="ugc",
crs="EPSG:4326",
)
scenario = 0
df2 = read_sql(
"""WITH data as (
SELECT r.huc_12,
sum(avg_loss) * 4.463 / %s as detach,
sum(avg_delivery) * 4.463 / %s as delivery,
sum(avg_runoff) / 25.4 / %s as runoff
from results_by_huc12 r
, huc12 h WHERE r.huc_12 = h.huc_12 and h.states ~* 'IA'
and r.scenario = %s and h.scenario = 0 and r.valid < '2020-01-01'
and r.valid > '2008-01-01'
GROUP by r.huc_12)
SELECT ugc, avg(detach) as detach, avg(delivery) as delivery,
avg(runoff) as runoff from data d JOIN huc12 h on (d.huc_12 = h.huc_12)
WHERE h.scenario = 0 GROUP by ugc ORDER by delivery desc
""",
pgconn,
params=(years, years, years, scenario),
index_col="ugc",
)
newcols = {
"detach": "det%s" % (0,),
"delivery": "del%s" % (0,),
"runoff": "run%s" % (0,),
}
for key, val in newcols.items():
df[val] = df2[key]
df = df.sort_values("del0", ascending=False)
print(df.head(10))
mp = MapPlot(
title="2008-2019 DEP Top 10 Erosive Counties", logo="dep", caption=""
)
df2 = df.head(10)
mp.fill_ugcs(df2["del0"].to_dict())
mp.postprocess(filename="test.png")
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
import itertools
import json
import logging
import math
import os
import string
import time
from datetime import datetime
from mimetypes import types_map
from urllib.parse import urljoin, quote
import bs4
import requests
from django.conf import settings
from django.contrib.gis.gdal.error import GDALException
from django.contrib.gis.geos import GEOSException, fromstr
from django.urls import resolve
from django.http import HttpResponse
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template
from django.utils import timezone
from django.utils.translation import get_language
from .settings import app_settings, API_SRID
logger = logging.getLogger(__name__)
def bbox_split(bbox, by_x=2, by_y=2, cycle=False):
"""Divide a box in rectangle, by_x parts and by_y parts"""
minx, miny, maxx, maxy = bbox
stepx = (maxx - minx) / by_x
stepy = (maxy - miny) / by_y
def gen():
"""define as inner function to decorate it with cycle"""
stepx_tmp = minx
while stepx_tmp + stepx <= maxx:
stepx_next = stepx_tmp + stepx
stepy_tmp = miny
while stepy_tmp + stepy <= maxy:
stepy_next = stepy_tmp + stepy
yield (stepx_tmp, stepy_tmp, stepx_next, stepy_next)
stepy_tmp = stepy_next
stepx_tmp = stepx_next
if cycle:
return itertools.cycle(gen())
else:
return gen()
def bbox_split_srid_2154(*args, **kwargs):
"""Just round"""
gen = bbox_split(*args, **kwargs)
return iter(lambda: map(round, next(gen)), None)
def api_bbox(bbox, srid=None, buffer=0.0):
""" Receives a tuple(xmin, ymin, xmax, ymax) and
returns a tuple in API projection.
:srid: bbox projection (Default: settings.SRID)
:buffer: grow the bbox in ratio of width (Default: 0.0)
"""
srid = srid or settings.SRID
wkt_box = 'POLYGON(({0} {1}, {2} {1}, {2} {3}, {0} {3}, {0} {1}))'
wkt = wkt_box.format(*bbox)
native = wkt_to_geom(wkt, srid_from=srid)
if srid != API_SRID:
native.transform(API_SRID)
if buffer > 0:
extent = native.extent
width = extent[2] - extent[0]
native = native.buffer(width * buffer)
return tuple(native.extent)
def wkt_to_geom(wkt, srid_from=None, silent=False):
if srid_from is None:
srid_from = API_SRID
try:
return fromstr(wkt, srid=srid_from)
except (GDALException, GEOSException) as e:
if not silent:
raise e
return None
def smart_urljoin(base, path):
if base[-1] != '/':
base += '/'
if path[0] == '/':
path = path[1:]
return urljoin(base, path)
def is_file_uptodate(path, date_update, delete_empty=True):
if not os.path.exists(path):
return False
if date_update is None:
return False
if os.path.getsize(path) == 0:
if delete_empty:
os.remove(path)
return False
modified = datetime.utcfromtimestamp(os.path.getmtime(path))
modified = modified.replace(tzinfo=timezone.utc)
return modified > date_update
def get_source(url, headers):
logger.info("Request to: %s" % url)
source = requests.get(url, headers=headers)
status_error = 'Request on %s failed (status=%s)' % (url, source.status_code)
assert source.status_code == 200, status_error
content_error = 'Request on %s returned empty content' % url
assert len(source.content) > 0, content_error
return source
def download_to_stream(url, stream, silent=False, headers=None):
""" Download url and writes response to stream.
"""
source = None
try:
try:
source = get_source(url, headers)
except requests.exceptions.ConnectionError:
time.sleep(1)
source = get_source(url, headers)
except (AssertionError, requests.exceptions.RequestException) as e:
logger.exception(e)
logger.info('Headers sent: %s' % headers)
if hasattr(source, 'text'):
logger.info('Response: %s' % source.text[:150])
if not silent:
raise
if source is None:
return source
try:
stream.write(source.content)
stream.flush()
except IOError as e:
logger.exception(e)
if not silent:
raise
if isinstance(stream, HttpResponse):
stream.status_code = source.status_code
# Copy headers
for header, value in source.headers.items():
stream[header] = value
return source
def convertit_url(url, from_type=None, to_type=None, proxy=False):
if not to_type:
to_type = 'application/pdf'
mimetype = to_type
if '/' not in mimetype:
extension = '.' + mimetype if not mimetype.startswith('.') else mimetype
mimetype = types_map[extension]
fromparam = ("&from=%s" % quote(from_type)) if from_type is not None else ''
params = 'url={url}{fromparam}&to={to}'.format(url=quote(url),
fromparam=fromparam,
to=quote(mimetype))
url = '{server}/?{params}'.format(server=app_settings['CONVERSION_SERVER'],
params=params)
return url
def convertit_download(url, destination, from_type=None, to_type='application/pdf', headers=None):
# Mock for tests
if getattr(settings, 'TEST', False):
with open(destination, 'w') as out_file:
out_file.write("Mock\n")
return
url = convertit_url(url, from_type, to_type)
fd = open(destination, 'wb') if isinstance(destination, str) else destination
download_to_stream(url, fd, headers=headers)
def capture_url(url, width=None, height=None, selector=None, waitfor=None):
"""Return URL to request a capture from Screamshotter
"""
server = app_settings['CAPTURE_SERVER']
width = ('&width=%s' % width) if width else ''
height = ('&height=%s' % height) if height else ''
selector = ('&selector=%s' % quote(selector)) if selector else ''
waitfor = ('&waitfor=%s' % quote(waitfor)) if waitfor else ''
params = '{width}{height}{selector}{waitfor}'.format(width=width,
height=height,
selector=selector,
waitfor=waitfor)
capture_url = '{server}/?url={url}{params}'.format(server=server,
url=quote(url),
params=params)
return capture_url
def capture_image(url, stream, **kwargs):
"""Capture url to stream.
"""
url = capture_url(url, **kwargs)
download_to_stream(url, stream)
def capture_map_image(url, destination, size=None, aspect=1.0, waitfor='.leaflet-tile-loaded', printcontext=None):
"""Prepare aspect of the detail page
It relies on JS code in MapEntity.Context
"""
# Control aspect of captured images
if size is None:
size = app_settings['MAP_CAPTURE_SIZE']
if aspect < 1.0:
mapsize = dict(width=size * aspect, height=size)
else:
mapsize = dict(width=size, height=size / aspect)
_printcontext = dict(mapsize=mapsize)
_printcontext['print'] = True
if printcontext:
_printcontext.update(printcontext)
serialized = json.dumps(_printcontext)
# Run head-less capture (takes time)
url += '?lang={}&context={}'.format(get_language(), quote(serialized))
with open(destination, 'wb') as fd:
capture_image(url, fd,
selector='.map-panel',
waitfor=waitfor)
def extract_attributes_html(url, request):
"""
The tidy XHTML version of objects attributes.
Since we have to insert them in document exports, we extract the
``details-panel`` of the detail page, using BeautifulSoup.
With this, we save a lot of efforts, since we do have to build specific Appy.pod
templates for each model.
"""
func, args, kwargs = resolve(url)
response = func(request, *args, **kwargs)
response.render()
soup = bs4.BeautifulSoup(response.content, 'lxml')
details = soup.find(id="properties")
if details is None:
raise ValueError('Content is of detail page is invalid')
# Remove "Add" buttons
for p in details('p'):
if 'autohide' in p.get('class', ''):
p.extract()
# Remove Javascript
for s in details('script'):
s.extract()
# Remove images (Appy.pod fails with them)
for i in details('img'):
i.replaceWith(i.get('title', ''))
# Remove links (Appy.pod sometimes shows empty strings)
for a in details('a'):
a.replaceWith(a.text)
# Prettify (ODT compat.) and convert unicode to XML entities
cooked = details.prettify('ascii', formatter='html').decode()
return cooked
def user_has_perm(user, perm):
# First check if the user has the permission (even anon user)
if user.has_perm(perm):
return True
if user.is_anonymous:
return perm in app_settings['ANONYMOUS_VIEWS_PERMS']
return False
def alphabet_enumeration(length):
"""
Return list of letters : A, B, ... Z, AA, AB, ...
See mapentity/leaflet.enumeration.js
"""
if length == 0:
return []
if length == 1:
return ["A"]
width = int(math.ceil(math.log(length, 26)))
enums = []
alphabet = string.ascii_uppercase
for i in range(length):
enum = ""
for j in range(width):
enum = alphabet[i % 26] + enum
i = i // 26
enums.append(enum)
return enums
def suffix_for(template_name_suffix, template_type, extension):
return "%s%s.%s" % (template_name_suffix, template_type, extension)
def name_for(app, modelname, suffix):
return "%s/%s%s" % (app, modelname, suffix)
def smart_get_template(model, suffix):
for appname, modelname in [(model._meta.app_label, model._meta.object_name.lower()),
("mapentity", "override"),
("mapentity", "mapentity")]:
try:
template_name = name_for(appname, modelname, suffix)
get_template(template_name) # Will raise if not exist
return template_name
except TemplateDoesNotExist:
pass
return None | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.collect;
import static java.util.Collections.singletonMap;
import com.google.common.annotations.GwtCompatible;
import java.util.Map.Entry;
import junit.framework.TestCase;
import org.jspecify.annotations.NullMarked;
import org.jspecify.annotations.Nullable;
/**
* Tests for {@code AbstractMapEntry}.
*
* @author Mike Bostock
*/
@GwtCompatible
@NullMarked
public class AbstractMapEntryTest extends TestCase {
private static final @Nullable String NK = null;
private static final @Nullable Integer NV = null;
private static <K extends @Nullable Object, V extends @Nullable Object> Entry<K, V> entry(
K key, V value) {
return new AbstractMapEntry<K, V>() {
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
};
}
private static <K extends @Nullable Object, V extends @Nullable Object> Entry<K, V> control(
K key, V value) {
return singletonMap(key, value).entrySet().iterator().next();
}
public void testToString() {
assertEquals("foo=1", entry("foo", 1).toString());
}
public void testToStringNull() {
assertEquals("null=1", entry(NK, 1).toString());
assertEquals("foo=null", entry("foo", NV).toString());
assertEquals("null=null", entry(NK, NV).toString());
}
public void testEquals() {
Entry<String, Integer> foo1 = entry("foo", 1);
// Explicitly call `equals`; `assertEquals` might return fast
assertTrue(foo1.equals(foo1));
assertEquals(control("foo", 1), foo1);
assertEquals(control("bar", 2), entry("bar", 2));
assertFalse(control("foo", 1).equals(entry("foo", 2)));
assertFalse(foo1.equals(control("bar", 1)));
assertFalse(foo1.equals(new Object()));
assertFalse(foo1.equals(null));
}
public void testEqualsNull() {
assertEquals(control(NK, 1), entry(NK, 1));
assertEquals(control("bar", NV), entry("bar", NV));
assertFalse(control(NK, 1).equals(entry(NK, 2)));
assertFalse(entry(NK, 1).equals(control("bar", 1)));
assertFalse(entry(NK, 1).equals(new Object()));
assertFalse(entry(NK, 1).equals(null));
}
public void testHashCode() {
assertEquals(control("foo", 1).hashCode(), entry("foo", 1).hashCode());
assertEquals(control("bar", 2).hashCode(), entry("bar", 2).hashCode());
}
public void testHashCodeNull() {
assertEquals(control(NK, 1).hashCode(), entry(NK, 1).hashCode());
assertEquals(control("bar", NV).hashCode(), entry("bar", NV).hashCode());
assertEquals(control(NK, NV).hashCode(), entry(NK, NV).hashCode());
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/collect/AbstractMapEntryTest.java |
# -*- coding: utf-8 -*-
#
#
# Author: Guewen Baconnier, Yannick Vaucher
# Copyright 2013-2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp import models, api, fields
from openerp.osv import orm
class SaleOrder(models.Model):
_inherit = 'sale.order'
def _prepare_order_line_procurement(self, cr, uid, order, line,
group_id=False, context=None):
values = super(SaleOrder, self)._prepare_order_line_procurement(
cr, uid, order, line, group_id=group_id, context=context)
if line.warehouse_id:
values['warehouse_id'] = line.warehouse_id.id
return values
@api.model
def _prepare_procurement_group_by_line(self, line):
vals = super(SaleOrder, self)._prepare_procurement_group_by_line(line)
# for compatibility with sale_quotation_sourcing
if line._get_procurement_group_key()[0] == 8:
if line.warehouse_id:
vals['name'] += '/' + line.warehouse_id.name
return vals
SO_STATES = {
'cancel': [('readonly', True)],
'progress': [('readonly', True)],
'manual': [('readonly', True)],
'shipping_except': [('readonly', True)],
'invoice_except': [('readonly', True)],
'done': [('readonly', True)],
}
warehouse_id = fields.Many2one(
'stock.warehouse',
'Default Warehouse',
states=SO_STATES,
help="If no source warehouse is selected on line, "
"this warehouse is used as default. ")
class SaleOrderLine(orm.Model):
_inherit = 'sale.order.line'
warehouse_id = fields.Many2one(
'stock.warehouse',
'Source Warehouse',
help="If a source warehouse is selected, "
"it will be used to define the route. "
"Otherwise, it will get the warehouse of "
"the sale order")
@api.multi
def _get_procurement_group_key(self):
""" Return a key with priority to be used to regroup lines in multiple
procurement groups
"""
priority = 8
key = super(SaleOrderLine, self)._get_procurement_group_key()
# Check priority
if key[0] >= priority:
return key
return (priority, self.warehouse_id.id) | unknown | codeparrot/codeparrot-clean | ||
<!DOCTYPE html>
<html lang="en">
<head>
<title>AuthenticationInterceptor Class Reference</title>
<link rel="stylesheet" type="text/css" href="../css/jazzy.css" />
<link rel="stylesheet" type="text/css" href="../css/highlight.css" />
<meta charset="utf-8">
<script src="../js/jquery.min.js" defer></script>
<script src="../js/jazzy.js" defer></script>
<script src="../js/lunr.min.js" defer></script>
<script src="../js/typeahead.jquery.js" defer></script>
<script src="../js/jazzy.search.js" defer></script>
</head>
<body>
<a name="//apple_ref/swift/Class/AuthenticationInterceptor" class="dashAnchor"></a>
<a title="AuthenticationInterceptor Class Reference"></a>
<header class="header">
<p class="header-col header-col--primary">
<a class="header-link" href="../index.html">
Alamofire 5.11.0 Docs
</a>
(96% documented)
</p>
<div class="header-col--secondary">
<form role="search" action="../search.json">
<input type="text" placeholder="Search documentation" data-typeahead>
</form>
</div>
<p class="header-col header-col--secondary">
<a class="header-link" href="https://github.com/Alamofire/Alamofire">
<img class="header-icon" src="../img/gh.png" alt="GitHub"/>
View on GitHub
</a>
</p>
<p class="header-col header-col--secondary">
<a class="header-link" href="dash-feed://https%3A%2F%2Falamofire.github.io%2FAlamofire%2Fdocsets%2FAlamofire.xml">
<img class="header-icon" src="../img/dash.png" alt="Dash"/>
Install in Dash
</a>
</p>
</header>
<p class="breadcrumbs">
<a class="breadcrumb" href="../index.html">Alamofire</a>
<img class="carat" src="../img/carat.png" alt=""/>
<a class="breadcrumb" href="../Classes.html">Classes</a>
<img class="carat" src="../img/carat.png" alt=""/>
AuthenticationInterceptor Class Reference
</p>
<div class="content-wrapper">
<nav class="navigation">
<ul class="nav-groups">
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Classes.html">Classes</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Adapter.html">Adapter</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/AlamofireNotifications.html">AlamofireNotifications</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/AuthenticationInterceptor.html">AuthenticationInterceptor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/AuthenticationInterceptor/RefreshWindow.html">– RefreshWindow</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/ClosureEventMonitor.html">ClosureEventMonitor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/CompositeEventMonitor.html">CompositeEventMonitor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/CompositeTrustEvaluator.html">CompositeTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/ConnectionLostRetryPolicy.html">ConnectionLostRetryPolicy</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataRequest.html">DataRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataResponseSerializer.html">DataResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest.html">DataStreamRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/Stream.html">– Stream</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/Event.html">– Event</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/Completion.html">– Completion</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DataStreamRequest/CancellationToken.html">– CancellationToken</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DecodableResponseSerializer.html">DecodableResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DefaultTrustEvaluator.html">DefaultTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DisabledTrustEvaluator.html">DisabledTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DownloadRequest.html">DownloadRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DownloadRequest/Options.html">– Options</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/DownloadRequest/Downloadable.html">– Downloadable</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Interceptor.html">Interceptor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/JSONParameterEncoder.html">JSONParameterEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/JSONResponseSerializer.html">JSONResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/MultipartFormData.html">MultipartFormData</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/NetworkReachabilityManager.html">NetworkReachabilityManager</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/NetworkReachabilityManager/NetworkReachabilityStatus.html">– NetworkReachabilityStatus</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/OfflineRetrier.html">OfflineRetrier</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/PinnedCertificatesTrustEvaluator.html">PinnedCertificatesTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/PublicKeysTrustEvaluator.html">PublicKeysTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Request.html">Request</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Request/State.html">– State</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Request/ResponseDisposition.html">– ResponseDisposition</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Retrier.html">Retrier</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/RetryPolicy.html">RetryPolicy</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/RevocationTrustEvaluator.html">RevocationTrustEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/RevocationTrustEvaluator/Options.html">– Options</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/ServerTrustManager.html">ServerTrustManager</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Session.html">Session</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/Session/RequestSetup.html">– RequestSetup</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/SessionDelegate.html">SessionDelegate</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/StringResponseSerializer.html">StringResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder.html">URLEncodedFormEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/ArrayEncoding.html">– ArrayEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/BoolEncoding.html">– BoolEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/DataEncoding.html">– DataEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/DateEncoding.html">– DateEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/KeyEncoding.html">– KeyEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/KeyPathEncoding.html">– KeyPathEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/NilEncoding.html">– NilEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/SpaceEncoding.html">– SpaceEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormEncoder/Error.html">– Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormParameterEncoder.html">URLEncodedFormParameterEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/URLEncodedFormParameterEncoder/Destination.html">– Destination</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/UploadRequest.html">UploadRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Classes/UploadRequest/Uploadable.html">– Uploadable</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Global%20Variables.html">Global Variables</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Global%20Variables.html#/s:9Alamofire2AFAA7SessionCvp">AF</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Enums.html">Enumerations</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError.html">AFError</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/MultipartEncodingFailureReason.html">– MultipartEncodingFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/UnexpectedInputStreamLength.html">– UnexpectedInputStreamLength</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ParameterEncodingFailureReason.html">– ParameterEncodingFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ParameterEncoderFailureReason.html">– ParameterEncoderFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ResponseValidationFailureReason.html">– ResponseValidationFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ResponseSerializationFailureReason.html">– ResponseSerializationFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/ServerTrustFailureReason.html">– ServerTrustFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFError/URLRequestValidationFailureReason.html">– URLRequestValidationFailureReason</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AFInfo.html">AFInfo</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/AuthenticationError.html">AuthenticationError</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Enums/RetryResult.html">RetryResult</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Extensions.html">Extensions</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/s:Sa">Array</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:objc(cs)NSBundle">Bundle</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/CharacterSet.html">CharacterSet</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/Error.html">Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/HTTPURLResponse.html">HTTPURLResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/s:10Foundation11JSONDecoderC">JSONDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/Notification.html">Notification</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@OSStatus">OSStatus</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/s:10Foundation19PropertyListDecoderC">PropertyListDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@SecCertificateRef">SecCertificate</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@SecPolicyRef">SecPolicy</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@T@SecTrustRef">SecTrust</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions.html#/c:@E@SecTrustResultType">SecTrustResultType</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/String.html">String</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URL.html">URL</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URLComponents.html">URLComponents</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URLRequest.html">URLRequest</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/URLSessionConfiguration.html">URLSessionConfiguration</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Extensions/%5BServerTrustEvaluating%5D.html">[ServerTrustEvaluating]</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Protocols.html">Protocols</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/AlamofireExtended.html">AlamofireExtended</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/AuthenticationCredential.html">AuthenticationCredential</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/Authenticator.html">Authenticator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/CachedResponseHandler.html">CachedResponseHandler</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataDecoder.html">DataDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataPreprocessor.html">DataPreprocessor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataResponseSerializerProtocol.html">DataResponseSerializerProtocol</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DataStreamSerializer.html">DataStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/DownloadResponseSerializerProtocol.html">DownloadResponseSerializerProtocol</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/EmptyResponse.html">EmptyResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/EventMonitor.html">EventMonitor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ParameterEncoder.html">ParameterEncoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ParameterEncoding.html">ParameterEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RedirectHandler.html">RedirectHandler</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestAdapter.html">RequestAdapter</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestDelegate.html">RequestDelegate</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestInterceptor.html">RequestInterceptor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/RequestRetrier.html">RequestRetrier</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ResponseSerializer.html">ResponseSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/ServerTrustEvaluating.html">ServerTrustEvaluating</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/URLConvertible.html">URLConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/URLRequestConvertible.html">URLRequestConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols.html#/s:9Alamofire17UploadConvertibleP">UploadConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/UploadableConvertible.html">UploadableConvertible</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Protocols/WebSocketMessageSerializer.html">WebSocketMessageSerializer</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Structs.html">Structures</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/AlamofireExtension.html">AlamofireExtension</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataResponse.html">DataResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataResponsePublisher.html">DataResponsePublisher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataStreamPublisher.html">DataStreamPublisher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataStreamTask.html">DataStreamTask</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DataTask.html">DataTask</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DecodableStreamSerializer.html">DecodableStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DecodableWebSocketMessageDecoder.html">DecodableWebSocketMessageDecoder</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DecodableWebSocketMessageDecoder/Error.html">– Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DeflateRequestCompressor.html">DeflateRequestCompressor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DeflateRequestCompressor/DuplicateHeaderBehavior.html">– DuplicateHeaderBehavior</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DeflateRequestCompressor.html#/s:9Alamofire24DeflateRequestCompressorV20DuplicateHeaderErrorV">– DuplicateHeaderError</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DownloadResponse.html">DownloadResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DownloadResponsePublisher.html">DownloadResponsePublisher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/DownloadTask.html">DownloadTask</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/Empty.html">Empty</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/GoogleXSSIPreprocessor.html">GoogleXSSIPreprocessor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/HTTPHeader.html">HTTPHeader</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/HTTPHeaders.html">HTTPHeaders</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/HTTPMethod.html">HTTPMethod</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/JSONEncoding.html">JSONEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/JSONEncoding/Error.html">– Error</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/PassthroughPreprocessor.html">PassthroughPreprocessor</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/PassthroughStreamSerializer.html">PassthroughStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/Redirector.html">Redirector</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/Redirector/Behavior.html">– Behavior</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/RequestAdapterState.html">RequestAdapterState</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/ResponseCacher.html">ResponseCacher</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/ResponseCacher/Behavior.html">– Behavior</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/StreamOf.html">StreamOf</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/StreamOf/Iterator.html">– Iterator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/StringStreamSerializer.html">StringStreamSerializer</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding.html">URLEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding/Destination.html">– Destination</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding/ArrayEncoding.html">– ArrayEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLEncoding/BoolEncoding.html">– BoolEncoding</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Structs/URLResponseSerializer.html">URLResponseSerializer</a>
</li>
</ul>
</li>
<li class="nav-group-name">
<a class="nav-group-name-link" href="../Typealiases.html">Type Aliases</a>
<ul class="nav-group-tasks">
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire14AFDataResponsea">AFDataResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire18AFDownloadResponsea">AFDownloadResponse</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire8AFResulta">AFResult</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire12AdaptHandlera">AdaptHandler</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire17DisabledEvaluatora">DisabledEvaluator</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire10Parametersa">Parameters</a>
</li>
<li class="nav-group-task">
<a class="nav-group-task-link" href="../Typealiases.html#/s:9Alamofire12RetryHandlera">RetryHandler</a>
</li>
</ul>
</li>
</ul>
</nav>
<article class="main-content">
<section class="section">
<div class="section-content top-matter">
<h1>AuthenticationInterceptor</h1>
<div class="declaration">
<div class="language">
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">final</span> <span class="kd">class</span> <span class="kt">AuthenticationInterceptor</span><span class="o"><</span><span class="kt">AuthenticatorType</span><span class="o">></span> <span class="p">:</span> <span class="kt"><a href="../Protocols/RequestInterceptor.html">RequestInterceptor</a></span><span class="p">,</span> <span class="kt">Sendable</span> <span class="k">where</span> <span class="kt">AuthenticatorType</span> <span class="p">:</span> <span class="kt"><a href="../Protocols/Authenticator.html">Authenticator</a></span></code></pre>
</div>
</div>
<p>The <code>AuthenticationInterceptor</code> class manages the queuing and threading complexity of authenticating requests.
It relies on an <code><a href="../Protocols/Authenticator.html">Authenticator</a></code> type to handle the actual <code>URLRequest</code> authentication and <code>Credential</code> refresh.</p>
</div>
</section>
<section class="section">
<div class="section-content">
<div class="task-group">
<div class="task-name-container">
<a name="/Typealiases"></a>
<a name="//apple_ref/swift/Section/Typealiases" class="dashAnchor"></a>
<div class="section-name-container">
<a class="section-name-link" href="#/Typealiases"></a>
<h3 class="section-name"><span>Typealiases</span>
</h3>
</div>
</div>
<ul class="item-container">
<li class="item">
<div>
<code>
<a name="/s:9Alamofire25AuthenticationInterceptorC10Credentiala"></a>
<a name="//apple_ref/swift/Alias/Credential" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire25AuthenticationInterceptorC10Credentiala">Credential</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Type of credential used to authenticate requests.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">typealias</span> <span class="kt">Credential</span> <span class="o">=</span> <span class="kt">AuthenticatorType</span><span class="o">.</span><span class="kt">Credential</span></code></pre>
</div>
</div>
</section>
</div>
</li>
</ul>
</div>
<div class="task-group">
<div class="task-name-container">
<a name="/Helper%20Types"></a>
<a name="//apple_ref/swift/Section/Helper Types" class="dashAnchor"></a>
<div class="section-name-container">
<a class="section-name-link" href="#/Helper%20Types"></a>
<h3 class="section-name"><span>Helper Types</span>
</h3>
</div>
</div>
<ul class="item-container">
<li class="item">
<div>
<code>
<a name="/s:9Alamofire25AuthenticationInterceptorC13RefreshWindowV"></a>
<a name="//apple_ref/swift/Struct/RefreshWindow" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire25AuthenticationInterceptorC13RefreshWindowV">RefreshWindow</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Type that defines a time window used to identify excessive refresh calls. When enabled, prior to executing a
refresh, the <code><a href="../Classes/AuthenticationInterceptor.html">AuthenticationInterceptor</a></code> compares the timestamp history of previous refresh calls against the
<code>RefreshWindow</code>. If more refreshes have occurred within the refresh window than allowed, the refresh is
cancelled and an <code>AuthorizationError.excessiveRefresh</code> error is thrown.</p>
<a href="../Classes/AuthenticationInterceptor/RefreshWindow.html" class="slightly-smaller">See more</a>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">struct</span> <span class="kt">RefreshWindow</span></code></pre>
</div>
</div>
</section>
</div>
</li>
</ul>
</div>
<div class="task-group">
<div class="task-name-container">
<a name="/Properties"></a>
<a name="//apple_ref/swift/Section/Properties" class="dashAnchor"></a>
<div class="section-name-container">
<a class="section-name-link" href="#/Properties"></a>
<h3 class="section-name"><span>Properties</span>
</h3>
</div>
</div>
<ul class="item-container">
<li class="item">
<div>
<code>
<a name="/s:9Alamofire25AuthenticationInterceptorC10credential10CredentialQzSgvp"></a>
<a name="//apple_ref/swift/Property/credential" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire25AuthenticationInterceptorC10credential10CredentialQzSgvp">credential</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>The <code><a href="../Classes/AuthenticationInterceptor.html#/s:9Alamofire25AuthenticationInterceptorC10Credentiala">Credential</a></code> used to authenticate requests.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="k">var</span> <span class="nv">credential</span><span class="p">:</span> <span class="kt"><a href="../Classes/AuthenticationInterceptor.html#/s:9Alamofire25AuthenticationInterceptorC10Credentiala">Credential</a></span><span class="p">?</span> <span class="p">{</span> <span class="k">get</span> <span class="k">set</span> <span class="p">}</span></code></pre>
</div>
</div>
</section>
</div>
</li>
</ul>
</div>
<div class="task-group">
<div class="task-name-container">
<a name="/Initialization"></a>
<a name="//apple_ref/swift/Section/Initialization" class="dashAnchor"></a>
<div class="section-name-container">
<a class="section-name-link" href="#/Initialization"></a>
<h3 class="section-name"><span>Initialization</span>
</h3>
</div>
</div>
<ul class="item-container">
<li class="item">
<div>
<code>
<a name="/s:9Alamofire25AuthenticationInterceptorC13authenticator10credential13refreshWindowACyxGx_10CredentialQzSgAC07RefreshG0Vyx_GSgtcfc"></a>
<a name="//apple_ref/swift/Method/init(authenticator:credential:refreshWindow:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire25AuthenticationInterceptorC13authenticator10credential13refreshWindowACyxGx_10CredentialQzSgAC07RefreshG0Vyx_GSgtcfc">init(authenticator:<wbr>credential:<wbr>refreshWindow:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
<p>Creates an <code>AuthenticationInterceptor</code> instance from the specified parameters.</p>
<p>A <code>nil</code> <code><a href="../Classes/AuthenticationInterceptor/RefreshWindow.html">RefreshWindow</a></code> will result in the <code>AuthenticationInterceptor</code> not checking for excessive refresh calls.
It is recommended to always use a <code><a href="../Classes/AuthenticationInterceptor/RefreshWindow.html">RefreshWindow</a></code> to avoid endless refresh cycles.</p>
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="nf">init</span><span class="p">(</span><span class="nv">authenticator</span><span class="p">:</span> <span class="kt">AuthenticatorType</span><span class="p">,</span>
<span class="nv">credential</span><span class="p">:</span> <span class="kt"><a href="../Classes/AuthenticationInterceptor.html#/s:9Alamofire25AuthenticationInterceptorC10Credentiala">Credential</a></span><span class="p">?</span> <span class="o">=</span> <span class="kc">nil</span><span class="p">,</span>
<span class="nv">refreshWindow</span><span class="p">:</span> <span class="kt"><a href="../Classes/AuthenticationInterceptor/RefreshWindow.html">RefreshWindow</a></span><span class="p">?</span> <span class="o">=</span> <span class="kt"><a href="../Classes/AuthenticationInterceptor/RefreshWindow.html">RefreshWindow</a></span><span class="p">())</span></code></pre>
</div>
</div>
<div>
<h4>Parameters</h4>
<table class="graybox">
<tbody>
<tr>
<td>
<code>
<em>authenticator</em>
</code>
</td>
<td>
<div>
<p>The <code><a href="../Protocols/Authenticator.html">Authenticator</a></code> type.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>credential</em>
</code>
</td>
<td>
<div>
<p>The <code><a href="../Classes/AuthenticationInterceptor.html#/s:9Alamofire25AuthenticationInterceptorC10Credentiala">Credential</a></code> if it exists. <code>nil</code> by default.</p>
</div>
</td>
</tr>
<tr>
<td>
<code>
<em>refreshWindow</em>
</code>
</td>
<td>
<div>
<p>The <code><a href="../Classes/AuthenticationInterceptor/RefreshWindow.html">RefreshWindow</a></code> used to identify excessive refresh calls. <code>RefreshWindow()</code> by default.</p>
</div>
</td>
</tr>
</tbody>
</table>
</div>
</section>
</div>
</li>
</ul>
</div>
<div class="task-group">
<div class="task-name-container">
<a name="/Adapt"></a>
<a name="//apple_ref/swift/Section/Adapt" class="dashAnchor"></a>
<div class="section-name-container">
<a class="section-name-link" href="#/Adapt"></a>
<h3 class="section-name"><span>Adapt</span>
</h3>
</div>
</div>
<ul class="item-container">
<li class="item">
<div>
<code>
<a name="/s:9Alamofire14RequestAdapterP5adapt_3for10completiony10Foundation10URLRequestV_AA7SessionCys6ResultOyAIs5Error_pGYbctF"></a>
<a name="//apple_ref/swift/Method/adapt(_:for:completion:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire14RequestAdapterP5adapt_3for10completiony10Foundation10URLRequestV_AA7SessionCys6ResultOyAIs5Error_pGYbctF">adapt(_:<wbr>for:<wbr>completion:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">func</span> <span class="nf">adapt</span><span class="p">(</span><span class="n">_</span> <span class="nv">urlRequest</span><span class="p">:</span> <span class="kt">URLRequest</span><span class="p">,</span> <span class="k">for</span> <span class="nv">session</span><span class="p">:</span> <span class="kt"><a href="../Classes/Session.html">Session</a></span><span class="p">,</span> <span class="nv">completion</span><span class="p">:</span> <span class="kd">@escaping</span> <span class="kd">@Sendable</span> <span class="p">(</span><span class="kt">Result</span><span class="o"><</span><span class="kt">URLRequest</span><span class="p">,</span> <span class="kd">any</span> <span class="kt">Error</span><span class="o">></span><span class="p">)</span> <span class="o">-></span> <span class="kt">Void</span><span class="p">)</span></code></pre>
</div>
</div>
</section>
</div>
</li>
</ul>
</div>
<div class="task-group">
<div class="task-name-container">
<a name="/Retry"></a>
<a name="//apple_ref/swift/Section/Retry" class="dashAnchor"></a>
<div class="section-name-container">
<a class="section-name-link" href="#/Retry"></a>
<h3 class="section-name"><span>Retry</span>
</h3>
</div>
</div>
<ul class="item-container">
<li class="item">
<div>
<code>
<a name="/s:9Alamofire14RequestRetrierP5retry_3for5dueTo10completionyAA0B0C_AA7SessionCs5Error_pyAA11RetryResultOYbctF"></a>
<a name="//apple_ref/swift/Method/retry(_:for:dueTo:completion:)" class="dashAnchor"></a>
<a class="token" href="#/s:9Alamofire14RequestRetrierP5retry_3for5dueTo10completionyAA0B0C_AA7SessionCs5Error_pyAA11RetryResultOYbctF">retry(_:<wbr>for:<wbr>dueTo:<wbr>completion:<wbr>)</a>
</code>
</div>
<div class="height-container">
<div class="pointer-container"></div>
<section class="section">
<div class="pointer"></div>
<div class="abstract">
</div>
<div class="declaration">
<h4>Declaration</h4>
<div class="language">
<p class="aside-title">Swift</p>
<pre class="highlight swift"><code><span class="kd">public</span> <span class="kd">func</span> <span class="nf">retry</span><span class="p">(</span><span class="n">_</span> <span class="nv">request</span><span class="p">:</span> <span class="kt"><a href="../Classes/Request.html">Request</a></span><span class="p">,</span> <span class="k">for</span> <span class="nv">session</span><span class="p">:</span> <span class="kt"><a href="../Classes/Session.html">Session</a></span><span class="p">,</span> <span class="n">dueTo</span> <span class="nv">error</span><span class="p">:</span> <span class="kd">any</span> <span class="kt">Error</span><span class="p">,</span> <span class="nv">completion</span><span class="p">:</span> <span class="kd">@escaping</span> <span class="kd">@Sendable</span> <span class="p">(</span><span class="kt"><a href="../Enums/RetryResult.html">RetryResult</a></span><span class="p">)</span> <span class="o">-></span> <span class="kt">Void</span><span class="p">)</span></code></pre>
</div>
</div>
</section>
</div>
</li>
</ul>
</div>
</div>
</section>
</article>
</div>
<section class="footer">
<p>© 2026 <a class="link" href="http://alamofire.org/" target="_blank" rel="external noopener">Alamofire Software Foundation</a>. All rights reserved. (Last updated: 2026-01-31)</p>
<p>Generated by <a class="link" href="https://github.com/realm/jazzy" target="_blank" rel="external noopener">jazzy ♪♫ v0.15.4</a>, a <a class="link" href="https://realm.io" target="_blank" rel="external noopener">Realm</a> project.</p>
</section>
</body>
</html> | html | github | https://github.com/Alamofire/Alamofire | docs/Classes/AuthenticationInterceptor.html |
#!/usr/bin/env python
import sys
import subprocess
import re
import telnetlib
HOST = "localhost"
def main():
regexIp = "\.1\.3\.6\.1\.4\.1\.8072\.2\.265\.(\d{1,5})\.(\d{1,4})\.(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"
if len(sys.argv) < 5:
print("error, parameter missing")
else:
if sys.argv[1] == "-s":
regexResult = re.match(regexIp, sys.argv[2])
if regexResult:
asn = regexResult.group(1)
port = regexResult.group(2)
ip = regexResult.group(3)
length = sys.argv[4]
prefix = ip + "/" + length
tn = telnetlib.Telnet(HOST, port)
tn.read_until("AS"+asn+">")
tn.write("enable\n")
tn.read_until("AS"+asn+"#")
tn.write("conf t\n")
tn.read_until("AS"+asn+"(config)#")
tn.write("router bgp "+asn+"\n")
tn.read_until("AS"+asn+"(config-router)#")
tn.write("no network "+prefix+"\n")
tn.write("exit\n")
tn.read_until("AS"+asn+"(config)#")
tn.write("exit\n")
tn.read_until("AS"+asn+"#")
tn.write("exit\n")
main() | unknown | codeparrot/codeparrot-clean | ||
/**********************************************************************
miniinit.c -
$Author$
created at: Thu Jul 11 22:09:57 JST 2013
Copyright (C) 2013 Yukihiro Matsumoto
**********************************************************************/
#include "ruby/ruby.h"
#include "ruby/encoding.h"
/* loadpath.c */
const char ruby_exec_prefix[] = "";
const char ruby_initial_load_paths[] = "";
/* localeinit.c */
VALUE
rb_locale_charmap(VALUE klass)
{
/* never used */
return Qnil;
}
int
rb_locale_charmap_index(void)
{
return -1;
}
int
Init_enc_set_filesystem_encoding(void)
{
return rb_enc_to_index(rb_default_external_encoding());
}
void rb_encdb_declare(const char *name);
int rb_encdb_alias(const char *alias, const char *orig);
void
Init_enc(void)
{
rb_encdb_declare("ASCII-8BIT");
rb_encdb_declare("US-ASCII");
rb_encdb_declare("UTF-8");
rb_encdb_alias("BINARY", "ASCII-8BIT");
rb_encdb_alias("ASCII", "US-ASCII");
}
/* miniruby does not support dynamic loading. */
void
Init_ext(void)
{
}
static void builtin_loaded(const char *feature_name, VALUE iseq);
#define BUILTIN_LOADED(feature_name, iseq) builtin_loaded(feature_name, (VALUE)(iseq))
#include "mini_builtin.c"
static struct st_table *loaded_builtin_table;
static void
builtin_loaded(const char *feature_name, VALUE iseq)
{
st_insert(loaded_builtin_table, (st_data_t)feature_name, (st_data_t)iseq);
rb_vm_register_global_object(iseq);
}
static int
each_builtin_i(st_data_t key, st_data_t val, st_data_t dmy)
{
const char *feature = (const char *)key;
const rb_iseq_t *iseq = (const rb_iseq_t *)val;
rb_yield_values(2, rb_str_new2(feature), rb_iseqw_new(iseq));
return ST_CONTINUE;
}
/* :nodoc: */
static VALUE
each_builtin(VALUE self)
{
st_foreach(loaded_builtin_table, each_builtin_i, 0);
return Qnil;
}
void
Init_builtin(void)
{
rb_define_singleton_method(rb_cRubyVM, "each_builtin", each_builtin, 0);
loaded_builtin_table = st_init_strtable();
}
void
Init_builtin_features(void)
{
// register for ruby
builtin_iseq_load("gem_prelude", NULL);
}
void
rb_free_loaded_builtin_table(void)
{
if (loaded_builtin_table)
st_free_table(loaded_builtin_table);
} | c | github | https://github.com/ruby/ruby | miniinit.c |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Japanese():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Japanese"])) | unknown | codeparrot/codeparrot-clean | ||
###############################################################################
##
## Copyright (C) 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketClientFactory, \
WebSocketClientProtocol, \
connectWS
class EchoClientProtocol(WebSocketClientProtocol):
def __init__(self, message):
self.message = message.encode('utf8')
def sendHello(self):
self.sendMessage(self.message)
def onOpen(self):
self.sendHello()
def onMessage(self, payload, isBinary):
if not isBinary:
print("Text message received: {}".format(payload.decode('utf8')))
reactor.callLater(1, self.sendHello)
class EchoClientFactory(WebSocketClientFactory):
def buildProtocol(self, addr):
proto = EchoClientProtocol(self.message)
proto.factory = self
return proto
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Need the WebSocket server address, i.e. ws://localhost:9000"
sys.exit(1)
factory = EchoClientFactory(sys.argv[1])
factory.message = sys.argv[2] if len(sys.argv) > 2 else "My configurable message"
connectWS(factory)
reactor.run() | unknown | codeparrot/codeparrot-clean | ||
import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegexp(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.testMethodPrefix == 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.sortTestMethodsUsing is cmp)
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
# -----------------------------------------------------------------------------
# yacc_prec1.py
#
# Tests case where precedence specifier doesn't match up to terminals
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','+','-'),
('left','*','/'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement : NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement : expression'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
| expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression : MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from ansible.module_utils._text import to_native
from ansible.plugins.filter.core import to_uuid
from ansible.errors import AnsibleFilterError
UUID_DEFAULT_NAMESPACE_TEST_CASES = (
('example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'),
('test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'),
('café.example', '8a99d6b1-fb8f-5f78-af86-879768589f56'),
)
UUID_TEST_CASES = (
('361E6D51-FAEC-444A-9079-341386DA8E2E', 'example.com', 'ae780c3a-a3ab-53c2-bfb4-098da300b3fe'),
('361E6D51-FAEC-444A-9079-341386DA8E2E', 'test.example', '8e437a35-c7c5-50ea-867c-5c254848dbc2'),
('11111111-2222-3333-4444-555555555555', 'example.com', 'e776faa5-5299-55dc-9057-7a00e6be2364'),
)
@pytest.mark.parametrize('value, expected', UUID_DEFAULT_NAMESPACE_TEST_CASES)
def test_to_uuid_default_namespace(value, expected):
assert expected == to_uuid(value)
@pytest.mark.parametrize('namespace, value, expected', UUID_TEST_CASES)
def test_to_uuid(namespace, value, expected):
assert expected == to_uuid(value, namespace=namespace)
def test_to_uuid_invalid_namespace():
with pytest.raises(AnsibleFilterError) as e:
to_uuid('example.com', namespace='11111111-2222-3333-4444-555555555')
assert 'Invalid value' in to_native(e.value) | unknown | codeparrot/codeparrot-clean | ||
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: api-support@onshape.zendesk.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTMParameterQuantity147AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"bt_type": (str,), # noqa: E501
"expression": (str,), # noqa: E501
"is_integer": (bool,), # noqa: E501
"units": (str,), # noqa: E501
"value": (float,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"bt_type": "btType", # noqa: E501
"expression": "expression", # noqa: E501
"is_integer": "isInteger", # noqa: E501
"units": "units", # noqa: E501
"value": "value", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""btm_parameter_quantity147_all_of.BTMParameterQuantity147AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
bt_type (str): [optional] # noqa: E501
expression (str): [optional] # noqa: E501
is_integer (bool): [optional] # noqa: E501
units (str): [optional] # noqa: E501
value (float): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env bash
#
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
set -e
if [ -z "$IOS_CMAKE_TOOLCHAIN_FILE" ]
then
echo "IOS_CMAKE_TOOLCHAIN_FILE not set; please set it to path of CMake toolchain file for iOS"
exit 1
fi
if [ ! -f "$IOS_CMAKE_TOOLCHAIN_FILE" ]
then
echo "IOS_CMAKE_TOOLCHAIN_FILE not a file path; did you properly setup ${IOS_CMAKE_TOOLCHAIN_FILE}?"
exit 1
fi
mkdir -p build/ios/armv7s
CMAKE_ARGS=()
# CMake-level configuration
CMAKE_ARGS+=("-DCMAKE_TOOLCHAIN_FILE=$IOS_CMAKE_TOOLCHAIN_FILE")
CMAKE_ARGS+=("-DCMAKE_BUILD_TYPE=Release")
CMAKE_ARGS+=("-DCMAKE_POSITION_INDEPENDENT_CODE=ON")
# QNNPACK-specific options
CMAKE_ARGS+=("-DPYTORCH_QNNPACK_LIBRARY_TYPE=static")
CMAKE_ARGS+=("-DPYTORCH_QNNPACK_BUILD_BENCHMARKS=OFF")
CMAKE_ARGS+=("-DPYTORCH_QNNPACK_BUILD_TESTS=OFF")
# iOS-specific options
CMAKE_ARGS+=("-DIOS_PLATFORM=OS64")
CMAKE_ARGS+=("-DIOS_ARCH=armv7s")
CMAKE_ARGS+=("-DENABLE_BITCODE=OFF")
CMAKE_ARGS+=("-DENABLE_ARC=OFF")
# Use-specified CMake arguments go last to allow overriding defaults
CMAKE_ARGS+=($@)
cd build/ios/armv7s && cmake ../../.. \
"${CMAKE_ARGS[@]}"
# Cross-platform parallel build
if [ "$(uname)" == "Darwin" ]
then
cmake --build . -- "-j$(sysctl -n hw.ncpu)"
else
cmake --build . -- "-j$(nproc)"
fi | unknown | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/quantized/cpu/qnnpack/scripts/build-ios-armv7s.sh |
/*
* Copyright (c) 2010-2011 Ævar Arnfjörð Bjarmason
*
* This is a skeleton no-op implementation of gettext for Git.
* You can replace it with something that uses libintl.h and wraps
* gettext() to try out the translations.
*/
#ifndef GETTEXT_H
#define GETTEXT_H
#if defined(_) || defined(Q_)
#error "namespace conflict: '_' or 'Q_' is pre-defined?"
#endif
#ifndef NO_GETTEXT
# include <libintl.h>
#else
# ifdef gettext
# undef gettext
# endif
# define gettext(s) (s)
# ifdef ngettext
# undef ngettext
# endif
# define ngettext(s, p, n) ((n == 1) ? (s) : (p))
#endif
#define FORMAT_PRESERVING(n) __attribute__((format_arg(n)))
#ifndef NO_GETTEXT
extern int git_gettext_enabled;
void git_setup_gettext(void);
int gettext_width(const char *s);
#else
#define git_gettext_enabled (0)
static inline void git_setup_gettext(void)
{
}
static inline int gettext_width(const char *s)
{
return strlen(s);
}
#endif
static inline FORMAT_PRESERVING(1) const char *_(const char *msgid)
{
if (!*msgid)
return "";
if (!git_gettext_enabled)
return msgid;
return gettext(msgid);
}
static inline FORMAT_PRESERVING(1) FORMAT_PRESERVING(2)
const char *Q_(const char *msgid, const char *plu, unsigned long n)
{
if (!git_gettext_enabled)
return n == 1 ? msgid : plu;
return ngettext(msgid, plu, n);
}
/* Mark msgid for translation but do not translate it. */
#define N_(msgid) msgid
const char *get_preferred_languages(void);
int is_utf8_locale(void);
#endif | c | github | https://github.com/git/git | gettext.h |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_PLATFORM_NULL_FILE_SYSTEM_H_
#define TENSORFLOW_CORE_PLATFORM_NULL_FILE_SYSTEM_H_
#include "tsl/platform/null_file_system.h"
namespace tensorflow {
#ifndef SWIG
using ::tsl::NullFileSystem; // NOLINT(misc-unused-using-decls)
#endif
// END_SKIP_DOXYGEN
} // namespace tensorflow
#endif // TENSORFLOW_CORE_PLATFORM_NULL_FILE_SYSTEM_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/platform/null_file_system.h |
"""
Helper methods related to EdxNotes.
"""
import json
import logging
import requests
from requests.exceptions import RequestException
from uuid import uuid4
from json import JSONEncoder
from datetime import datetime
from courseware.access import has_access
from courseware.views import get_current_child
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import ugettext as _
from capa.util import sanitize_html
from student.models import anonymous_id_for_user
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from util.date_utils import get_default_time_display
from dateutil.parser import parse as dateutil_parse
from provider.oauth2.models import AccessToken, Client
import oauth2_provider.oidc as oidc
from provider.utils import now
from opaque_keys.edx.keys import UsageKey
from .exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
log = logging.getLogger(__name__)
HIGHLIGHT_TAG = "span"
HIGHLIGHT_CLASS = "note-highlight"
class NoteJSONEncoder(JSONEncoder):
"""
Custom JSON encoder that encode datetime objects to appropriate time strings.
"""
# pylint: disable=method-hidden
def default(self, obj):
if isinstance(obj, datetime):
return get_default_time_display(obj)
return json.JSONEncoder.default(self, obj)
def get_id_token(user):
"""
Generates JWT ID-Token, using or creating user's OAuth access token.
"""
try:
client = Client.objects.get(name="edx-notes")
except Client.DoesNotExist:
raise ImproperlyConfigured("OAuth2 Client with name 'edx-notes' is not present in the DB")
try:
access_token = AccessToken.objects.get(
client=client,
user=user,
expires__gt=now()
)
except AccessToken.DoesNotExist:
access_token = AccessToken(client=client, user=user)
access_token.save()
id_token = oidc.id_token(access_token)
secret = id_token.access_token.client.client_secret
return id_token.encode(secret)
def get_token_url(course_id):
"""
Returns token url for the course.
"""
return reverse("get_token", kwargs={
"course_id": unicode(course_id),
})
def send_request(user, course_id, path="", query_string=None):
"""
Sends a request with appropriate parameters and headers.
"""
url = get_endpoint(path)
params = {
"user": anonymous_id_for_user(user, None),
"course_id": unicode(course_id).encode("utf-8"),
}
if query_string:
params.update({
"text": query_string,
"highlight": True,
"highlight_tag": HIGHLIGHT_TAG,
"highlight_class": HIGHLIGHT_CLASS,
})
try:
response = requests.get(
url,
headers={
"x-annotator-auth-token": get_id_token(user)
},
params=params
)
except RequestException:
raise EdxNotesServiceUnavailable(_("EdxNotes Service is unavailable. Please try again in a few minutes."))
return response
def get_parent_unit(xblock):
"""
Find vertical that is a unit, not just some container.
"""
while xblock:
xblock = xblock.get_parent()
if xblock is None:
return None
parent = xblock.get_parent()
if parent is None:
return None
if parent.category == 'sequential':
return xblock
def preprocess_collection(user, course, collection):
"""
Prepare `collection(notes_list)` provided by edx-notes-api
for rendering in a template:
add information about ancestor blocks,
convert "updated" to date
Raises:
ItemNotFoundError - when appropriate module is not found.
"""
# pylint: disable=too-many-statements
store = modulestore()
filtered_collection = list()
cache = {}
with store.bulk_operations(course.id):
for model in collection:
model.update({
u"text": sanitize_html(model["text"]),
u"quote": sanitize_html(model["quote"]),
u"updated": dateutil_parse(model["updated"]),
})
usage_id = model["usage_id"]
if usage_id in cache:
model.update(cache[usage_id])
filtered_collection.append(model)
continue
usage_key = UsageKey.from_string(usage_id)
# Add a course run if necessary.
usage_key = usage_key.replace(course_key=store.fill_in_run(usage_key.course_key))
try:
item = store.get_item(usage_key)
except ItemNotFoundError:
log.debug("Module not found: %s", usage_key)
continue
if not has_access(user, "load", item, course_key=course.id):
log.debug("User %s does not have an access to %s", user, item)
continue
unit = get_parent_unit(item)
if unit is None:
log.debug("Unit not found: %s", usage_key)
continue
section = unit.get_parent()
if not section:
log.debug("Section not found: %s", usage_key)
continue
if section in cache:
usage_context = cache[section]
usage_context.update({
"unit": get_module_context(course, unit),
})
model.update(usage_context)
cache[usage_id] = cache[unit] = usage_context
filtered_collection.append(model)
continue
chapter = section.get_parent()
if not chapter:
log.debug("Chapter not found: %s", usage_key)
continue
if chapter in cache:
usage_context = cache[chapter]
usage_context.update({
"unit": get_module_context(course, unit),
"section": get_module_context(course, section),
})
model.update(usage_context)
cache[usage_id] = cache[unit] = cache[section] = usage_context
filtered_collection.append(model)
continue
usage_context = {
"unit": get_module_context(course, unit),
"section": get_module_context(course, section),
"chapter": get_module_context(course, chapter),
}
model.update(usage_context)
cache[usage_id] = cache[unit] = cache[section] = cache[chapter] = usage_context
filtered_collection.append(model)
return filtered_collection
def get_module_context(course, item):
"""
Returns dispay_name and url for the parent module.
"""
item_dict = {
'location': unicode(item.location),
'display_name': item.display_name_with_default,
}
if item.category == 'chapter' and item.get_parent():
# course is a locator w/o branch and version
# so for uniformity we replace it with one that has them
course = item.get_parent()
item_dict['index'] = get_index(item_dict['location'], course.children)
elif item.category == 'vertical':
section = item.get_parent()
chapter = section.get_parent()
# Position starts from 1, that's why we add 1.
position = get_index(unicode(item.location), section.children) + 1
item_dict['url'] = reverse('courseware_position', kwargs={
'course_id': unicode(course.id),
'chapter': chapter.url_name,
'section': section.url_name,
'position': position,
})
if item.category in ('chapter', 'sequential'):
item_dict['children'] = [unicode(child) for child in item.children]
return item_dict
def get_index(usage_key, children):
"""
Returns an index of the child with `usage_key`.
"""
children = [unicode(child) for child in children]
return children.index(usage_key)
def search(user, course, query_string):
"""
Returns search results for the `query_string(str)`.
"""
response = send_request(user, course.id, "search", query_string)
try:
content = json.loads(response.content)
collection = content["rows"]
except (ValueError, KeyError):
log.warning("invalid JSON: %s", response.content)
raise EdxNotesParseError(_("Server error. Please try again in a few minutes."))
content.update({
"rows": preprocess_collection(user, course, collection)
})
return json.dumps(content, cls=NoteJSONEncoder)
def get_notes(user, course):
"""
Returns all notes for the user.
"""
response = send_request(user, course.id, "annotations")
try:
collection = json.loads(response.content)
except ValueError:
return None
if not collection:
return None
return json.dumps(preprocess_collection(user, course, collection), cls=NoteJSONEncoder)
def get_endpoint(path=""):
"""
Returns edx-notes-api endpoint.
"""
try:
url = settings.EDXNOTES_INTERFACE['url']
if not url.endswith("/"):
url += "/"
if path:
if path.startswith("/"):
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
return url + path
except (AttributeError, KeyError):
raise ImproperlyConfigured(_("No endpoint was provided for EdxNotes."))
def get_course_position(course_module):
"""
Return the user's current place in the course.
If this is the user's first time, leads to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, leads to COURSE/CHAPTER.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': unicode(course_module.id)}
chapter = get_current_child(course_module, min_depth=1)
if chapter is None:
log.debug("No chapter found when loading current position in course")
return None
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return {
'display_name': chapter.display_name_with_default,
'url': reverse('courseware_chapter', kwargs=urlargs),
}
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=1)
if section is None:
log.debug("No section found when loading current position in course")
return None
urlargs['section'] = section.url_name
return {
'display_name': section.display_name_with_default,
'url': reverse('courseware_section', kwargs=urlargs)
}
def generate_uid():
"""
Generates unique id.
"""
return uuid4().int # pylint: disable=no-member
def is_feature_enabled(course):
"""
Returns True if Student Notes feature is enabled for the course,
False otherwise.
In order for the application to be enabled it must be:
1) enabled globally via FEATURES.
2) present in the course tab configuration.
3) Harvard Annotation Tool must be disabled for the course.
"""
return (settings.FEATURES.get("ENABLE_EDXNOTES")
and [t for t in course.tabs if t["type"] == "edxnotes"] # tab found
and not is_harvard_notes_enabled(course))
def is_harvard_notes_enabled(course):
"""
Returns True if Harvard Annotation Tool is enabled for the course,
False otherwise.
Checks for 'textannotation', 'imageannotation', 'videoannotation' in the list
of advanced modules of the course.
"""
modules = set(['textannotation', 'imageannotation', 'videoannotation'])
return bool(modules.intersection(course.advanced_modules)) | unknown | codeparrot/codeparrot-clean | ||
use std::borrow::Cow;
use bstr::{ByteSlice, ByteVec};
/// The final component of the path, if it is a normal file.
///
/// If the path terminates in `..`, or consists solely of a root of prefix,
/// file_name will return `None`.
pub(crate) fn file_name<'a>(path: &Cow<'a, [u8]>) -> Option<Cow<'a, [u8]>> {
if path.is_empty() {
return None;
}
let last_slash = path.rfind_byte(b'/').map(|i| i + 1).unwrap_or(0);
let got = match *path {
Cow::Borrowed(path) => Cow::Borrowed(&path[last_slash..]),
Cow::Owned(ref path) => {
let mut path = path.clone();
path.drain_bytes(..last_slash);
Cow::Owned(path)
}
};
if got == &b".."[..] {
return None;
}
Some(got)
}
/// Return a file extension given a path's file name.
///
/// Note that this does NOT match the semantics of std::path::Path::extension.
/// Namely, the extension includes the `.` and matching is otherwise more
/// liberal. Specifically, the extension is:
///
/// * None, if the file name given is empty;
/// * None, if there is no embedded `.`;
/// * Otherwise, the portion of the file name starting with the final `.`.
///
/// e.g., A file name of `.rs` has an extension `.rs`.
///
/// N.B. This is done to make certain glob match optimizations easier. Namely,
/// a pattern like `*.rs` is obviously trying to match files with a `rs`
/// extension, but it also matches files like `.rs`, which doesn't have an
/// extension according to std::path::Path::extension.
pub(crate) fn file_name_ext<'a>(
name: &Cow<'a, [u8]>,
) -> Option<Cow<'a, [u8]>> {
if name.is_empty() {
return None;
}
let last_dot_at = match name.rfind_byte(b'.') {
None => return None,
Some(i) => i,
};
Some(match *name {
Cow::Borrowed(name) => Cow::Borrowed(&name[last_dot_at..]),
Cow::Owned(ref name) => {
let mut name = name.clone();
name.drain_bytes(..last_dot_at);
Cow::Owned(name)
}
})
}
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
/// that recognize other characters as separators.
#[cfg(unix)]
pub(crate) fn normalize_path(path: Cow<'_, [u8]>) -> Cow<'_, [u8]> {
// UNIX only uses /, so we're good.
path
}
/// Normalizes a path to use `/` as a separator everywhere, even on platforms
/// that recognize other characters as separators.
#[cfg(not(unix))]
pub(crate) fn normalize_path(mut path: Cow<[u8]>) -> Cow<[u8]> {
use std::path::is_separator;
for i in 0..path.len() {
if path[i] == b'/' || !is_separator(char::from(path[i])) {
continue;
}
path.to_mut()[i] = b'/';
}
path
}
#[cfg(test)]
mod tests {
use std::borrow::Cow;
use bstr::{B, ByteVec};
use super::{file_name_ext, normalize_path};
macro_rules! ext {
($name:ident, $file_name:expr, $ext:expr) => {
#[test]
fn $name() {
let bs = Vec::from($file_name);
let got = file_name_ext(&Cow::Owned(bs));
assert_eq!($ext.map(|s| Cow::Borrowed(B(s))), got);
}
};
}
ext!(ext1, "foo.rs", Some(".rs"));
ext!(ext2, ".rs", Some(".rs"));
ext!(ext3, "..rs", Some(".rs"));
ext!(ext4, "", None::<&str>);
ext!(ext5, "foo", None::<&str>);
macro_rules! normalize {
($name:ident, $path:expr, $expected:expr) => {
#[test]
fn $name() {
let bs = Vec::from_slice($path);
let got = normalize_path(Cow::Owned(bs));
assert_eq!($expected.to_vec(), got.into_owned());
}
};
}
normalize!(normal1, b"foo", b"foo");
normalize!(normal2, b"foo/bar", b"foo/bar");
#[cfg(unix)]
normalize!(normal3, b"foo\\bar", b"foo\\bar");
#[cfg(not(unix))]
normalize!(normal3, b"foo\\bar", b"foo/bar");
#[cfg(unix)]
normalize!(normal4, b"foo\\bar/baz", b"foo\\bar/baz");
#[cfg(not(unix))]
normalize!(normal4, b"foo\\bar/baz", b"foo/bar/baz");
} | rust | github | https://github.com/BurntSushi/ripgrep | crates/globset/src/pathutil.rs |
{
"BGREWRITEAOF": {
"summary": "Asynchronously rewrites the append-only file to disk.",
"complexity": "O(1)",
"group": "server",
"since": "1.0.0",
"arity": 1,
"function": "bgrewriteaofCommand",
"command_flags": [
"NO_ASYNC_LOADING",
"ADMIN",
"NOSCRIPT"
],
"reply_schema": {
"description": "A simple string reply indicating that the rewriting started or is about to start ASAP",
"type": "string"
}
}
} | json | github | https://github.com/redis/redis | src/commands/bgrewriteaof.json |
A private item was used outside of its scope.
Erroneous code example:
```compile_fail,E0624
mod inner {
pub struct Foo;
impl Foo {
fn method(&self) {}
}
}
let foo = inner::Foo;
foo.method(); // error: method `method` is private
```
Two possibilities are available to solve this issue:
1. Only use the item in the scope it has been defined:
```
mod inner {
pub struct Foo;
impl Foo {
fn method(&self) {}
}
pub fn call_method(foo: &Foo) { // We create a public function.
foo.method(); // Which calls the item.
}
}
let foo = inner::Foo;
inner::call_method(&foo); // And since the function is public, we can call the
// method through it.
```
2. Make the item public:
```
mod inner {
pub struct Foo;
impl Foo {
pub fn method(&self) {} // It's now public.
}
}
let foo = inner::Foo;
foo.method(); // Ok!
``` | unknown | github | https://github.com/rust-lang/rust | compiler/rustc_error_codes/src/error_codes/E0624.md |
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=access-member-before-definition,attribute-defined-outside-init,unused-argument
import os
from wlauto import Instrument, Parameter, Executable
from wlauto.exceptions import ConfigError, InstrumentError
from wlauto.utils.types import list_or_string
class FilePoller(Instrument):
name = 'file_poller'
description = """
Polls the given files at a set sample interval. The values are output in CSV format.
This instrument places a file called poller.csv in each iterations result directory.
This file will contain a timestamp column which will be in uS, the rest of the columns
will be the contents of the polled files at that time.
This instrument will strip any commas or new lines for the files' values
before writing them.
"""
parameters = [
Parameter('sample_interval', kind=int, default=1000,
description="""The interval between samples in mS."""),
Parameter('files', kind=list_or_string,
description="""A list of paths to the files to be polled"""),
Parameter('labels', kind=list_or_string,
description="""A list of lables to be used in the CSV output for
the corresponding files. This cannot be used if
a `*` wildcard is used in a path."""),
Parameter('as_root', kind=bool, default=False,
description="""
Whether or not the poller will be run as root. This should be
used when the file you need to poll can only be accessed by root.
"""),
]
def validate(self):
if self.labels and any(['*' in f for f in self.files]):
raise ConfigError('You cannot used manual labels with `*` wildcards')
def initialize(self, context):
if not self.device.is_rooted and self.as_root:
raise ConfigError('The device is not rooted, cannot run poller as root.')
host_poller = context.resolver.get(Executable(self, self.device.abi,
"poller"))
target_poller = self.device.install(host_poller)
expanded_paths = []
for path in self.files:
if "*" in path:
for p in self.device.listdir(path):
expanded_paths.append(p)
else:
expanded_paths.append(path)
self.files = expanded_paths
if not self.labels:
self.labels = self._generate_labels()
self.target_output_path = self.device.path.join(self.device.working_directory, 'poller.csv')
self.target_log_path = self.device.path.join(self.device.working_directory, 'poller.log')
self.command = '{} -t {} -l {} {} > {} 2>{}'.format(target_poller,
self.sample_interval * 1000,
','.join(self.labels),
' '.join(self.files),
self.target_output_path,
self.target_log_path)
def start(self, context):
self.device.kick_off(self.command, as_root=self.as_root)
def stop(self, context):
self.device.killall('poller', signal='TERM', as_root=self.as_root)
def update_result(self, context):
host_output_file = os.path.join(context.output_directory, 'poller.csv')
self.device.pull_file(self.target_output_path, host_output_file)
context.add_artifact('poller_output', host_output_file, kind='data')
host_log_file = os.path.join(context.output_directory, 'poller.log')
self.device.pull_file(self.target_log_path, host_log_file)
context.add_artifact('poller_log', host_log_file, kind='log')
with open(host_log_file) as fh:
for line in fh:
if 'ERROR' in line:
raise InstrumentError(line.strip())
if 'WARNING' in line:
self.logger.warning(line.strip())
def teardown(self, context):
self.device.delete_file(self.target_output_path)
self.device.delete_file(self.target_log_path)
def _generate_labels(self):
# Split paths into their parts
path_parts = [f.split(self.device.path.sep) for f in self.files]
# Identify which parts differ between at least two of the paths
differ_map = [len(set(x)) > 1 for x in zip(*path_parts)]
# compose labels from path parts that differ
labels = []
for pp in path_parts:
label_parts = [p for i, p in enumerate(pp[:-1])
if i >= len(differ_map) or differ_map[i]]
label_parts.append(pp[-1]) # always use file name even if same for all
labels.append('-'.join(label_parts))
return labels | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
***************************************************************************
TestData.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Victor Olaya'
import os.path
testDataPath = os.path.join(os.path.dirname(__file__), 'testdata')
def table():
return os.path.join(testDataPath, 'table.dbf')
def points():
return os.path.join(testDataPath, 'points.gml')
def invalid_geometries():
return os.path.join(testDataPath, 'invalidgeometries.gml') | unknown | codeparrot/codeparrot-clean | ||
/* contrib/pg_freespacemap/pg_freespacemap--1.2--1.3.sql */
-- complain if script is sourced in psql, rather than via ALTER EXTENSION
\echo Use "ALTER EXTENSION pg_freespacemap UPDATE TO '1.3'" to load this file. \quit
CREATE OR REPLACE FUNCTION
pg_freespace(rel regclass, blkno OUT bigint, avail OUT int2)
RETURNS SETOF RECORD
LANGUAGE SQL PARALLEL SAFE
BEGIN ATOMIC
SELECT blkno, pg_freespace($1, blkno) AS avail
FROM generate_series('0'::bigint, pg_relation_size($1) / current_setting('block_size'::text)::bigint - '1'::bigint) AS blkno;
END; | sql | github | https://github.com/postgres/postgres | contrib/pg_freespacemap/pg_freespacemap--1.2--1.3.sql |
use std::io::{self, Write};
use gsgdt::GraphvizSettings;
use rustc_graphviz as dot;
use super::generic_graph::mir_fn_to_generic_graph;
use super::pretty::dump_mir_def_ids;
use crate::mir::*;
/// Write a graphviz DOT graph of a list of MIRs.
pub fn write_mir_graphviz<W>(tcx: TyCtxt<'_>, single: Option<DefId>, w: &mut W) -> io::Result<()>
where
W: Write,
{
let def_ids = dump_mir_def_ids(tcx, single);
let mirs = def_ids
.iter()
.filter(|def_id| !tcx.is_trivial_const(*def_id))
.flat_map(|def_id| {
if tcx.is_const_fn(*def_id) {
vec![tcx.optimized_mir(*def_id), tcx.mir_for_ctfe(*def_id)]
} else {
vec![tcx.instance_mir(ty::InstanceKind::Item(*def_id))]
}
})
.collect::<Vec<_>>();
let use_subgraphs = mirs.len() > 1;
if use_subgraphs {
writeln!(w, "digraph __crate__ {{")?;
}
for mir in mirs {
write_mir_fn_graphviz(tcx, mir, use_subgraphs, w)?;
}
if use_subgraphs {
writeln!(w, "}}")?;
}
Ok(())
}
/// Write a graphviz DOT graph of the MIR.
pub fn write_mir_fn_graphviz<'tcx, W>(
tcx: TyCtxt<'tcx>,
body: &Body<'_>,
subgraph: bool,
w: &mut W,
) -> io::Result<()>
where
W: Write,
{
// Global graph properties
let font = format!(r#"fontname="{}""#, tcx.sess.opts.unstable_opts.graphviz_font);
let mut graph_attrs = vec![&font[..]];
let mut content_attrs = vec![&font[..]];
let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
if dark_mode {
graph_attrs.push(r#"bgcolor="black""#);
graph_attrs.push(r#"fontcolor="white""#);
content_attrs.push(r#"color="white""#);
content_attrs.push(r#"fontcolor="white""#);
}
// Graph label
let mut label = String::from("");
// FIXME: remove this unwrap
write_graph_label(tcx, body, &mut label).unwrap();
let g = mir_fn_to_generic_graph(tcx, body);
let settings = GraphvizSettings {
graph_attrs: Some(graph_attrs.join(" ")),
node_attrs: Some(content_attrs.join(" ")),
edge_attrs: Some(content_attrs.join(" ")),
graph_label: Some(label),
};
g.to_dot(w, &settings, subgraph)
}
/// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
/// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
/// all the variables and temporaries.
fn write_graph_label<'tcx, W: std::fmt::Write>(
tcx: TyCtxt<'tcx>,
body: &Body<'_>,
w: &mut W,
) -> std::fmt::Result {
let def_id = body.source.def_id();
write!(w, "fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
// fn argument types.
for (i, arg) in body.args_iter().enumerate() {
if i > 0 {
write!(w, ", ")?;
}
write!(w, "{:?}: {}", Place::from(arg), escape(&body.local_decls[arg].ty))?;
}
write!(w, ") -> {}", escape(&body.return_ty()))?;
write!(w, r#"<br align="left"/>"#)?;
for local in body.vars_and_temps_iter() {
let decl = &body.local_decls[local];
write!(w, "let ")?;
if decl.mutability.is_mut() {
write!(w, "mut ")?;
}
write!(w, r#"{:?}: {};<br align="left"/>"#, Place::from(local), escape(&decl.ty))?;
}
for var_debug_info in &body.var_debug_info {
write!(
w,
r#"debug {} => {};<br align="left"/>"#,
var_debug_info.name,
escape(&var_debug_info.value),
)?;
}
Ok(())
}
fn escape<T: Debug>(t: &T) -> String {
dot::escape_html(&format!("{t:?}"))
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_middle/src/mir/graphviz.rs |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.annotation;
import java.util.Collections;
import java.util.List;
import org.junit.jupiter.api.Test;
import org.springframework.aop.Advisor;
import org.springframework.aop.framework.Advised;
import org.springframework.aop.support.AopUtils;
import org.springframework.cache.CacheManager;
import org.springframework.cache.interceptor.BeanFactoryCacheOperationSourceAdvisor;
import org.springframework.cache.support.NoOpCacheManager;
import org.springframework.context.annotation.AdviceMode;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.stereotype.Repository;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatException;
/**
* Integration tests for the @EnableCaching annotation.
*
* @author Chris Beams
* @since 3.1
*/
class EnableCachingIntegrationTests {
@Test
void repositoryIsClassBasedCacheProxy() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(Config.class, ProxyTargetClassCachingConfig.class);
ctx.refresh();
assertCacheProxying(ctx);
assertThat(AopUtils.isCglibProxy(ctx.getBean(FooRepository.class))).isTrue();
}
@Test
void repositoryUsesAspectJAdviceMode() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(Config.class, AspectJCacheConfig.class);
// this test is a bit fragile, but gets the job done, proving that an
// attempt was made to look up the AJ aspect. It's due to classpath issues
// in integration-tests that it's not found.
assertThatException().isThrownBy(ctx::refresh)
.withMessageContaining("AspectJCachingConfiguration");
}
private void assertCacheProxying(AnnotationConfigApplicationContext ctx) {
FooRepository repo = ctx.getBean(FooRepository.class);
assertThat(isCacheProxy(repo)).isTrue();
}
private boolean isCacheProxy(FooRepository repo) {
if (AopUtils.isAopProxy(repo)) {
for (Advisor advisor : ((Advised)repo).getAdvisors()) {
if (advisor instanceof BeanFactoryCacheOperationSourceAdvisor) {
return true;
}
}
}
return false;
}
@Configuration
@EnableCaching(proxyTargetClass=true)
static class ProxyTargetClassCachingConfig {
@Bean
CacheManager mgr() {
return new NoOpCacheManager();
}
}
@Configuration
static class Config {
@Bean
FooRepository fooRepository() {
return new DummyFooRepository();
}
}
@Configuration
@EnableCaching(mode=AdviceMode.ASPECTJ)
static class AspectJCacheConfig {
@Bean
CacheManager cacheManager() {
return new NoOpCacheManager();
}
}
interface FooRepository {
List<Object> findAll();
}
@Repository
static class DummyFooRepository implements FooRepository {
@Override
@Cacheable("primary")
public List<Object> findAll() {
return Collections.emptyList();
}
}
} | java | github | https://github.com/spring-projects/spring-framework | integration-tests/src/test/java/org/springframework/cache/annotation/EnableCachingIntegrationTests.java |
# -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (C) 2009 Renato Lima - Akretion
{
'name': 'Brazilian - Accounting',
'category': 'Localization/Account Charts',
'description': """
Base module for the Brazilian localization
==========================================
This module consists in:
- Generic Brazilian chart of accounts
- Brazilian taxes such as:
- IPI
- ICMS
- PIS
- COFINS
- ISS
- IR
- IRPJ
- CSLL
The field tax_discount has also been added in the account.tax.template and
account.tax objects to allow the proper computation of some Brazilian VATs
such as ICMS. The chart of account creation wizard has been extended to
propagate those new data properly.
It's important to note however that this module lack many implementations to
use OpenERP properly in Brazil. Those implementations (such as the electronic
fiscal Invoicing which is already operational) are brought by more than 15
additional modules of the Brazilian Launchpad localization project
https://launchpad.net/openerp.pt-br-localiz and their dependencies in the
extra addons branch. Those modules aim at not breaking with the remarkable
OpenERP modularity, this is why they are numerous but small. One of the
reasons for maintaining those modules apart is that Brazilian Localization
leaders need commit rights agility to complete the localization as companies
fund the remaining legal requirements (such as soon fiscal ledgers,
accounting SPED, fiscal SPED and PAF ECF that are still missing as September
2011). Those modules are also strictly licensed under AGPL V3 and today don't
come with any additional paid permission for online use of 'private modules'.
""",
'author': 'Akretion, OpenERP Brasil',
'website': 'http://openerpbrasil.org',
'version': '1.0',
'depends': ['account'],
'data': [
'data/account_chart_template.xml',
'data/account.account.template.csv',
'data/account_tax_template.xml',
'account_view.xml',
'account_chart_template.yml',
],
'installable': True,
} | unknown | codeparrot/codeparrot-clean | ||
<?php
$container->loadFromExtension('security', [
'access_decision_manager' => [
'allow_if_all_abstain' => true,
'allow_if_equal_granted_denied' => false,
],
'providers' => [
'default' => [
'memory' => [
'users' => [
'foo' => ['password' => 'foo', 'roles' => 'ROLE_USER'],
],
],
],
],
'firewalls' => [
'simple' => ['pattern' => '/login', 'security' => false],
],
]); | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/SecurityBundle/Tests/DependencyInjection/Fixtures/php/access_decision_manager_customized_config.php |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import spack.architecture
import pytest
from spack.spec import *
from spack.variant import *
def target_factory(spec_string, target_concrete):
spec = Spec(spec_string)
if target_concrete:
spec._mark_concrete()
substitute_abstract_variants(spec)
return spec
def argument_factory(argument_spec, left):
try:
# If it's not anonymous, allow it
right = target_factory(argument_spec, False)
except Exception:
right = parse_anonymous_spec(argument_spec, left.name)
return right
def check_satisfies(target_spec, argument_spec, target_concrete=False):
left = target_factory(target_spec, target_concrete)
right = argument_factory(argument_spec, left)
# Satisfies is one-directional.
assert left.satisfies(right)
assert left.satisfies(argument_spec)
# If left satisfies right, then we should be able to constrain
# right by left. Reverse is not always true.
right.copy().constrain(left)
def check_unsatisfiable(target_spec, argument_spec, target_concrete=False):
left = target_factory(target_spec, target_concrete)
right = argument_factory(argument_spec, left)
assert not left.satisfies(right)
assert not left.satisfies(argument_spec)
with pytest.raises(UnsatisfiableSpecError):
right.copy().constrain(left)
def check_constrain(expected, spec, constraint):
exp = Spec(expected)
spec = Spec(spec)
constraint = Spec(constraint)
spec.constrain(constraint)
assert exp == spec
def check_constrain_changed(spec, constraint):
spec = Spec(spec)
assert spec.constrain(constraint)
def check_constrain_not_changed(spec, constraint):
spec = Spec(spec)
assert not spec.constrain(constraint)
def check_invalid_constraint(spec, constraint):
spec = Spec(spec)
constraint = Spec(constraint)
with pytest.raises(UnsatisfiableSpecError):
spec.constrain(constraint)
@pytest.mark.usefixtures('config', 'builtin_mock')
class TestSpecSematics(object):
"""This tests satisfies(), constrain() and other semantic operations
on specs.
"""
def test_satisfies(self):
check_satisfies('libelf@0.8.13', '@0:1')
check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1')
def test_satisfies_namespace(self):
check_satisfies('builtin.mpich', 'mpich')
check_satisfies('builtin.mock.mpich', 'mpich')
# TODO: only works for deps now, but shouldn't we allow for root spec?
# check_satisfies('builtin.mock.mpich', 'mpi')
check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich')
check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich')
def test_satisfies_namespaced_dep(self):
"""Ensure spec from same or unspecified namespace satisfies namespace
constraint."""
check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')
check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')
check_satisfies(
'mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich')
check_unsatisfiable(
'mpileaks ^builtin.mock.mpich', '^builtin.mpich')
def test_satisfies_compiler(self):
check_satisfies('foo%gcc', '%gcc')
check_satisfies('foo%intel', '%intel')
check_unsatisfiable('foo%intel', '%gcc')
check_unsatisfiable('foo%intel', '%pgi')
def test_satisfies_compiler_version(self):
check_satisfies('foo%gcc', '%gcc@4.7.2')
check_satisfies('foo%intel', '%intel@4.7.2')
check_satisfies('foo%pgi@4.5', '%pgi@4.4:4.6')
check_satisfies('foo@2.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
check_unsatisfiable('foo%pgi@4.3', '%pgi@4.4:4.6')
check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi')
check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
def test_satisfies_architecture(self):
check_satisfies(
'foo platform=test',
'platform=test')
check_satisfies(
'foo platform=linux',
'platform=linux')
check_satisfies(
'foo platform=test',
'platform=test target=frontend')
check_satisfies(
'foo platform=test',
'platform=test os=frontend target=frontend')
check_satisfies(
'foo platform=test os=frontend target=frontend',
'platform=test')
check_unsatisfiable(
'foo platform=linux',
'platform=test os=redhat6 target=x86_32')
check_unsatisfiable(
'foo os=redhat6',
'platform=test os=debian6 target=x86_64')
check_unsatisfiable(
'foo target=x86_64',
'platform=test os=redhat6 target=x86_32')
check_satisfies(
'foo arch=test-None-None',
'platform=test')
check_satisfies(
'foo arch=test-None-frontend',
'platform=test target=frontend')
check_satisfies(
'foo arch=test-frontend-frontend',
'platform=test os=frontend target=frontend')
check_satisfies(
'foo arch=test-frontend-frontend',
'platform=test')
check_unsatisfiable(
'foo arch=test-frontend-frontend',
'platform=test os=frontend target=backend')
check_satisfies(
'foo platform=test target=frontend os=frontend',
'platform=test target=frontend os=frontend')
check_satisfies(
'foo platform=test target=backend os=backend',
'platform=test target=backend os=backend')
check_satisfies(
'foo platform=test target=default_target os=default_os',
'platform=test os=default_os')
check_unsatisfiable(
'foo platform=test target=x86_32 os=redhat6',
'platform=linux target=x86_32 os=redhat6')
def test_satisfies_dependencies(self):
check_satisfies('mpileaks^mpich', '^mpich')
check_satisfies('mpileaks^zmpi', '^zmpi')
check_unsatisfiable('mpileaks^mpich', '^zmpi')
check_unsatisfiable('mpileaks^zmpi', '^mpich')
def test_satisfies_dependency_versions(self):
check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3')
check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0')
check_satisfies(
'mpileaks^mpich@2.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
check_unsatisfiable(
'mpileaks^mpich@4.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
check_unsatisfiable(
'mpileaks^mpich@2.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
check_unsatisfiable(
'mpileaks^mpich@4.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
def test_satisfies_virtual_dependencies(self):
check_satisfies('mpileaks^mpi', '^mpi')
check_satisfies('mpileaks^mpi', '^mpich')
check_satisfies('mpileaks^mpi', '^zmpi')
check_unsatisfiable('mpileaks^mpich', '^zmpi')
def test_satisfies_virtual_dependency_versions(self):
check_satisfies('mpileaks^mpi@1.5', '^mpi@1.2:1.6')
check_unsatisfiable('mpileaks^mpi@3', '^mpi@1.2:1.6')
check_satisfies('mpileaks^mpi@2:', '^mpich')
check_satisfies('mpileaks^mpi@2:', '^mpich@3.0.4')
check_satisfies('mpileaks^mpi@2:', '^mpich2@1.4')
check_satisfies('mpileaks^mpi@1:', '^mpich2')
check_satisfies('mpileaks^mpi@2:', '^mpich2')
check_unsatisfiable('mpileaks^mpi@3:', '^mpich2@1.4')
check_unsatisfiable('mpileaks^mpi@3:', '^mpich2')
check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0')
def test_satisfies_matching_variant(self):
check_satisfies('mpich+foo', 'mpich+foo')
check_satisfies('mpich~foo', 'mpich~foo')
check_satisfies('mpich foo=1', 'mpich foo=1')
# confirm that synonymous syntax works correctly
check_satisfies('mpich+foo', 'mpich foo=True')
check_satisfies('mpich foo=true', 'mpich+foo')
check_satisfies('mpich~foo', 'mpich foo=FALSE')
check_satisfies('mpich foo=False', 'mpich~foo')
def test_satisfies_multi_value_variant(self):
# Check quoting
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo="bar,baz"')
check_satisfies('multivalue_variant foo=bar,baz',
'multivalue_variant foo=bar,baz')
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo=bar,baz')
# A more constrained spec satisfies a less constrained one
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo="bar"')
check_satisfies('multivalue_variant foo="bar,baz"',
'multivalue_variant foo="baz"')
check_satisfies('multivalue_variant foo="bar,baz,barbaz"',
'multivalue_variant foo="bar,baz"')
check_satisfies('multivalue_variant foo="bar,baz"',
'foo="bar,baz"')
check_satisfies('multivalue_variant foo="bar,baz"',
'foo="bar"')
def test_satisfies_single_valued_variant(self):
"""Tests that the case reported in
https://github.com/LLNL/spack/pull/2386#issuecomment-282147639
is handled correctly.
"""
a = Spec('a foobar=bar')
a.concretize()
assert a.satisfies('foobar=bar')
# Assert that an autospec generated from a literal
# gives the right result for a single valued variant
assert 'foobar=bar' in a
assert 'foobar=baz' not in a
assert 'foobar=fee' not in a
# ... and for a multi valued variant
assert 'foo=bar' in a
# Check that conditional dependencies are treated correctly
assert '^b' in a
def test_unsatisfied_single_valued_variant(self):
a = Spec('a foobar=baz')
a.concretize()
assert '^b' not in a
mv = Spec('multivalue_variant')
mv.concretize()
assert 'a@1.0' not in mv
def test_indirect_unsatisfied_single_valued_variant(self):
spec = Spec('singlevalue-variant-dependent')
spec.concretize()
assert 'a@1.0' not in spec
def test_unsatisfiable_multi_value_variant(self):
# Semantics for a multi-valued variant is different
# Depending on whether the spec is concrete or not
a = target_factory(
'multivalue_variant foo="bar"', target_concrete=True
)
spec_str = 'multivalue_variant foo="bar,baz"'
b = Spec(spec_str)
assert not a.satisfies(b)
assert not a.satisfies(spec_str)
# A concrete spec cannot be constrained further
with pytest.raises(UnsatisfiableSpecError):
a.constrain(b)
a = Spec('multivalue_variant foo="bar"')
spec_str = 'multivalue_variant foo="bar,baz"'
b = Spec(spec_str)
# The specs are abstract and they **could** be constrained
assert a.satisfies(b)
assert a.satisfies(spec_str)
# An abstract spec can instead be constrained
assert a.constrain(b)
a = target_factory(
'multivalue_variant foo="bar,baz"', target_concrete=True
)
spec_str = 'multivalue_variant foo="bar,baz,quux"'
b = Spec(spec_str)
assert not a.satisfies(b)
assert not a.satisfies(spec_str)
# A concrete spec cannot be constrained further
with pytest.raises(UnsatisfiableSpecError):
a.constrain(b)
a = Spec('multivalue_variant foo="bar,baz"')
spec_str = 'multivalue_variant foo="bar,baz,quux"'
b = Spec(spec_str)
# The specs are abstract and they **could** be constrained
assert a.satisfies(b)
assert a.satisfies(spec_str)
# An abstract spec can instead be constrained
assert a.constrain(b)
# ...but will fail during concretization if there are
# values in the variant that are not allowed
with pytest.raises(InvalidVariantValueError):
a.concretize()
# This time we'll try to set a single-valued variant
a = Spec('multivalue_variant fee="bar"')
spec_str = 'multivalue_variant fee="baz"'
b = Spec(spec_str)
# The specs are abstract and they **could** be constrained,
# as before concretization I don't know which type of variant
# I have (if it is not a BV)
assert a.satisfies(b)
assert a.satisfies(spec_str)
# A variant cannot be parsed as single-valued until we try to
# concretize. This means that we can constrain the variant above
assert a.constrain(b)
# ...but will fail during concretization if there are
# multiple values set
with pytest.raises(MultipleValuesInExclusiveVariantError):
a.concretize()
def test_unsatisfiable_variant_types(self):
# These should fail due to incompatible types
# FIXME: these needs to be checked as the new relaxed
# FIXME: semantic makes them fail (constrain does not raise)
# check_unsatisfiable('multivalue_variant +foo',
# 'multivalue_variant foo="bar"')
# check_unsatisfiable('multivalue_variant ~foo',
# 'multivalue_variant foo="bar"')
check_unsatisfiable(
target_spec='multivalue_variant foo="bar"',
argument_spec='multivalue_variant +foo',
target_concrete=True
)
check_unsatisfiable(
target_spec='multivalue_variant foo="bar"',
argument_spec='multivalue_variant ~foo',
target_concrete=True
)
def test_satisfies_unconstrained_variant(self):
# only asked for mpich, no constraints. Either will do.
check_satisfies('mpich+foo', 'mpich')
check_satisfies('mpich~foo', 'mpich')
check_satisfies('mpich foo=1', 'mpich')
def test_unsatisfiable_variants(self):
# This case is different depending on whether the specs are concrete.
# 'mpich' is not concrete:
check_satisfies('mpich', 'mpich+foo', False)
check_satisfies('mpich', 'mpich~foo', False)
check_satisfies('mpich', 'mpich foo=1', False)
# 'mpich' is concrete:
check_unsatisfiable('mpich', 'mpich+foo', True)
check_unsatisfiable('mpich', 'mpich~foo', True)
check_unsatisfiable('mpich', 'mpich foo=1', True)
def test_unsatisfiable_variant_mismatch(self):
# No matchi in specs
check_unsatisfiable('mpich~foo', 'mpich+foo')
check_unsatisfiable('mpich+foo', 'mpich~foo')
check_unsatisfiable('mpich foo=True', 'mpich foo=False')
def test_satisfies_matching_compiler_flag(self):
check_satisfies('mpich cppflags="-O3"', 'mpich cppflags="-O3"')
check_satisfies(
'mpich cppflags="-O3 -Wall"', 'mpich cppflags="-O3 -Wall"'
)
def test_satisfies_unconstrained_compiler_flag(self):
# only asked for mpich, no constraints. Any will do.
check_satisfies('mpich cppflags="-O3"', 'mpich')
def test_unsatisfiable_compiler_flag(self):
# This case is different depending on whether the specs are concrete.
# 'mpich' is not concrete:
check_satisfies('mpich', 'mpich cppflags="-O3"', False)
# 'mpich' is concrete:
check_unsatisfiable('mpich', 'mpich cppflags="-O3"', True)
def test_copy_satisfies_transitive(self):
spec = Spec('dttop')
spec.concretize()
copy = spec.copy()
for s in spec.traverse():
assert s.satisfies(copy[s.name])
assert copy[s.name].satisfies(s)
def test_unsatisfiable_compiler_flag_mismatch(self):
# No matchi in specs
check_unsatisfiable(
'mpich cppflags="-O3"', 'mpich cppflags="-O2"')
def test_satisfies_virtual(self):
# Don't use check_satisfies: it checks constrain() too, and
# you can't constrain a non-virtual by a virtual.
assert Spec('mpich').satisfies(Spec('mpi'))
assert Spec('mpich2').satisfies(Spec('mpi'))
assert Spec('zmpi').satisfies(Spec('mpi'))
def test_satisfies_virtual_dep_with_virtual_constraint(self):
"""Ensure we can satisfy virtual constraints when there are multiple
vdep providers in the specs."""
assert Spec('netlib-lapack ^openblas').satisfies(
'netlib-lapack ^openblas'
)
assert not Spec('netlib-lapack ^netlib-blas').satisfies(
'netlib-lapack ^openblas'
)
assert not Spec('netlib-lapack ^openblas').satisfies(
'netlib-lapack ^netlib-blas'
)
assert Spec('netlib-lapack ^netlib-blas').satisfies(
'netlib-lapack ^netlib-blas'
)
def test_satisfies_same_spec_with_different_hash(self):
"""Ensure that concrete specs are matched *exactly* by hash."""
s1 = Spec('mpileaks').concretized()
s2 = s1.copy()
assert s1.satisfies(s2)
assert s2.satisfies(s1)
# Simulate specs that were installed before and after a change to
# Spack's hashing algorithm. This just reverses s2's hash.
s2._hash = s1.dag_hash()[-1::-1]
assert not s1.satisfies(s2)
assert not s2.satisfies(s1)
# ========================================================================
# Indexing specs
# ========================================================================
def test_self_index(self):
s = Spec('callpath')
assert s['callpath'] == s
def test_dep_index(self):
s = Spec('callpath')
s.normalize()
assert s['callpath'] == s
assert type(s['dyninst']) == Spec
assert type(s['libdwarf']) == Spec
assert type(s['libelf']) == Spec
assert type(s['mpi']) == Spec
assert s['dyninst'].name == 'dyninst'
assert s['libdwarf'].name == 'libdwarf'
assert s['libelf'].name == 'libelf'
assert s['mpi'].name == 'mpi'
def test_spec_contains_deps(self):
s = Spec('callpath')
s.normalize()
assert 'dyninst' in s
assert 'libdwarf' in s
assert 'libelf' in s
assert 'mpi' in s
@pytest.mark.usefixtures('config')
def test_virtual_index(self):
s = Spec('callpath')
s.concretize()
s_mpich = Spec('callpath ^mpich')
s_mpich.concretize()
s_mpich2 = Spec('callpath ^mpich2')
s_mpich2.concretize()
s_zmpi = Spec('callpath ^zmpi')
s_zmpi.concretize()
assert s['mpi'].name != 'mpi'
assert s_mpich['mpi'].name == 'mpich'
assert s_mpich2['mpi'].name == 'mpich2'
assert s_zmpi['zmpi'].name == 'zmpi'
for spec in [s, s_mpich, s_mpich2, s_zmpi]:
assert 'mpi' in spec
# ========================================================================
# Constraints
# ========================================================================
def test_constrain_variants(self):
check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3')
check_constrain(
'libelf@2.1:2.5%gcc@4.5:4.6',
'libelf@0:2.5%gcc@2:4.6',
'libelf@2.1:3%gcc@4.5:4.7'
)
check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo')
check_constrain(
'libelf+debug+foo', 'libelf+debug', 'libelf+debug+foo'
)
check_constrain(
'libelf debug=2 foo=1', 'libelf debug=2', 'libelf foo=1'
)
check_constrain(
'libelf debug=2 foo=1', 'libelf debug=2', 'libelf debug=2 foo=1'
)
check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo')
check_constrain(
'libelf+debug~foo', 'libelf+debug', 'libelf+debug~foo'
)
def test_constrain_multi_value_variant(self):
check_constrain(
'multivalue_variant foo="bar,baz"',
'multivalue_variant foo="bar"',
'multivalue_variant foo="baz"'
)
check_constrain(
'multivalue_variant foo="bar,baz,barbaz"',
'multivalue_variant foo="bar,barbaz"',
'multivalue_variant foo="baz"'
)
def test_constrain_compiler_flags(self):
check_constrain(
'libelf cflags="-O3" cppflags="-Wall"',
'libelf cflags="-O3"',
'libelf cppflags="-Wall"'
)
check_constrain(
'libelf cflags="-O3" cppflags="-Wall"',
'libelf cflags="-O3"',
'libelf cflags="-O3" cppflags="-Wall"'
)
def test_constrain_architecture(self):
check_constrain(
'libelf target=default_target os=default_os',
'libelf target=default_target os=default_os',
'libelf target=default_target os=default_os'
)
check_constrain(
'libelf target=default_target os=default_os',
'libelf',
'libelf target=default_target os=default_os'
)
def test_constrain_compiler(self):
check_constrain(
'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7'
)
check_constrain(
'libelf %gcc@4.4.7', 'libelf', 'libelf %gcc@4.4.7'
)
def test_invalid_constraint(self):
check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3')
check_invalid_constraint(
'libelf@0:2.5%gcc@4.8:4.9', 'libelf@2.1:3%gcc@4.5:4.7')
check_invalid_constraint('libelf+debug', 'libelf~debug')
check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo')
check_invalid_constraint('libelf debug=True', 'libelf debug=False')
check_invalid_constraint(
'libelf cppflags="-O3"', 'libelf cppflags="-O2"')
check_invalid_constraint(
'libelf platform=test target=be os=be', 'libelf target=fe os=fe'
)
def test_constrain_changed(self):
check_constrain_changed('libelf', '@1.0')
check_constrain_changed('libelf', '@1.0:5.0')
check_constrain_changed('libelf', '%gcc')
check_constrain_changed('libelf%gcc', '%gcc@4.5')
check_constrain_changed('libelf', '+debug')
check_constrain_changed('libelf', '~debug')
check_constrain_changed('libelf', 'debug=2')
check_constrain_changed('libelf', 'cppflags="-O3"')
platform = spack.architecture.platform()
check_constrain_changed(
'libelf', 'target=' + platform.target('default_target').name)
check_constrain_changed(
'libelf', 'os=' + platform.operating_system('default_os').name)
def test_constrain_not_changed(self):
check_constrain_not_changed('libelf', 'libelf')
check_constrain_not_changed('libelf@1.0', '@1.0')
check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0')
check_constrain_not_changed('libelf%gcc', '%gcc')
check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5')
check_constrain_not_changed('libelf+debug', '+debug')
check_constrain_not_changed('libelf~debug', '~debug')
check_constrain_not_changed('libelf debug=2', 'debug=2')
check_constrain_not_changed(
'libelf cppflags="-O3"', 'cppflags="-O3"')
platform = spack.architecture.platform()
default_target = platform.target('default_target').name
check_constrain_not_changed(
'libelf target=' + default_target, 'target=' + default_target)
def test_constrain_dependency_changed(self):
check_constrain_changed('libelf^foo', 'libelf^foo@1.0')
check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0')
check_constrain_changed('libelf^foo', 'libelf^foo%gcc')
check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5')
check_constrain_changed('libelf^foo', 'libelf^foo+debug')
check_constrain_changed('libelf^foo', 'libelf^foo~debug')
platform = spack.architecture.platform()
default_target = platform.target('default_target').name
check_constrain_changed(
'libelf^foo', 'libelf^foo target=' + default_target)
def test_constrain_dependency_not_changed(self):
check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0')
check_constrain_not_changed(
'libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0')
check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc')
check_constrain_not_changed(
'libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5')
check_constrain_not_changed(
'libelf^foo+debug', 'libelf^foo+debug')
check_constrain_not_changed(
'libelf^foo~debug', 'libelf^foo~debug')
check_constrain_not_changed(
'libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"')
platform = spack.architecture.platform()
default_target = platform.target('default_target').name
check_constrain_not_changed(
'libelf^foo target=' + default_target,
'libelf^foo target=' + default_target)
def test_exceptional_paths_for_constructor(self):
with pytest.raises(TypeError):
Spec((1, 2))
with pytest.raises(ValueError):
Spec('')
with pytest.raises(ValueError):
Spec('libelf foo') | unknown | codeparrot/codeparrot-clean | ||
import content from "./loader!!";
it("should compile", () => {
expect(typeof content).toBe("string");
expect(content.startsWith("webpack://")).toBe(true);
}); | javascript | github | https://github.com/webpack/webpack | test/cases/loaders/import-module/index.js |
from sympy.matrices.expressions.slice import MatrixSlice
from sympy.matrices.expressions import MatrixSymbol
from sympy.abc import a, b, c, d, k, l, m, n
from sympy.utilities.pytest import raises, XFAIL
from sympy.functions.elementary.integers import floor
from sympy.assumptions import assuming, Q
X = MatrixSymbol('X', n, m)
Y = MatrixSymbol('Y', m, k)
def test_shape():
B = MatrixSlice(X, (a, b), (c, d))
assert B.shape == (b - a, d - c)
def test_entry():
B = MatrixSlice(X, (a, b), (c, d))
assert B[0,0] == X[a, c]
assert B[k,l] == X[a+k, c+l]
raises(IndexError, lambda : MatrixSlice(X, 1, (2, 5))[1, 0])
assert X[1::2, :][1, 3] == X[1+2, 3]
assert X[:, 1::2][3, 1] == X[3, 1+2]
def test_on_diag():
assert not MatrixSlice(X, (a, b), (c, d)).on_diag
assert MatrixSlice(X, (a, b), (a, b)).on_diag
def test_inputs():
assert MatrixSlice(X, 1, (2, 5)) == MatrixSlice(X, (1, 2), (2, 5))
assert MatrixSlice(X, 1, (2, 5)).shape == (1, 3)
def test_slicing():
assert X[1:5, 2:4] == MatrixSlice(X, (1, 5), (2, 4))
assert X[1, 2:4] == MatrixSlice(X, 1, (2, 4))
assert X[1:5, :].shape == (4, X.shape[1])
assert X[:, 1:5].shape == (X.shape[0], 4)
assert X[::2, ::2].shape == (floor(n/2), floor(m/2))
assert X[2, :] == MatrixSlice(X, 2, (0, m))
assert X[k, :] == MatrixSlice(X, k, (0, m))
def test_exceptions():
X = MatrixSymbol('x', 10, 20)
raises(IndexError, lambda: X[0:12, 2])
raises(IndexError, lambda: X[0:9, 22])
raises(IndexError, lambda: X[-1:5, 2])
@XFAIL
def test_symmetry():
X = MatrixSymbol('x', 10, 10)
Y = X[:5, 5:]
with assuming(Q.symmetric(X)):
assert Y.T == X[5:, :5]
def test_slice_of_slice():
X = MatrixSymbol('x', 10, 10)
assert X[2, :][:, 3][0, 0] == X[2, 3]
assert X[:5, :5][:4, :4] == X[:4, :4]
assert X[1:5, 2:6][1:3, 2] == X[2:4, 4]
assert X[1:9:2, 2:6][1:3, 2] == X[3:7:2, 4]
def test_negative_index():
X = MatrixSymbol('x', 10, 10)
assert X[-1, :] == X[9, :] | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.f5_utils import AnsibleF5Client
try:
from library.bigip_device_ntp import Parameters
from library.bigip_device_ntp import ModuleManager
from library.bigip_device_ntp import ArgumentSpec
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_device_ntp import Parameters
from ansible.modules.network.f5.bigip_device_ntp import ModuleManager
from ansible.modules.network.f5.bigip_device_ntp import ArgumentSpec
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
ntp = ['192.168.1.1', '192.168.1.2']
args = dict(
ntp_servers=ntp,
timezone='Arctic/Longyearbyen'
)
p = Parameters(args)
assert p.ntp_servers == ntp
assert p.timezone == 'Arctic/Longyearbyen'
def test_api_parameters(self):
ntp = ['192.168.1.1', '192.168.1.2']
args = dict(
servers=ntp,
timezone='Arctic/Longyearbyen'
)
p = Parameters(args)
assert p.ntp_servers == ntp
assert p.timezone == 'Arctic/Longyearbyen'
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestModuleManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update_ntp_servers(self, *args):
ntp = ['10.1.1.1', '10.1.1.2']
set_module_args(
dict(
ntp_servers=ntp,
server='localhost',
user='admin',
password='password'
)
)
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
load_fixture('load_ntp.json')
)
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
mm = ModuleManager(client)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['ntp_servers'] == ntp
def test_update_timezone(self, *args):
set_module_args(
dict(
timezone='Arctic/Longyearbyen',
server='localhost',
user='admin',
password='password'
)
)
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
load_fixture('load_ntp.json')
)
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
mm = ModuleManager(client)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['timezone'] == 'Arctic/Longyearbyen'
def test_update_ntp_servers_and_timezone(self, *args):
ntp = ['10.1.1.1', '10.1.1.2']
set_module_args(
dict(
ntp_servers=ntp,
timezone='Arctic/Longyearbyen',
server='localhost',
user='admin',
password='password'
)
)
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
load_fixture('load_ntp.json')
)
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
mm = ModuleManager(client)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['ntp_servers'] == ntp
assert results['timezone'] == 'Arctic/Longyearbyen'
def test_absent_ntp_servers(self, *args):
ntp = []
set_module_args(
dict(
ntp_servers=ntp,
timezone='America/Los_Angeles',
server='localhost',
user='admin',
password='password',
state='absent'
)
)
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
load_fixture('load_ntp.json')
)
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
mm = ModuleManager(client)
# Override methods to force specific logic in the module to happen
mm.absent_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['ntp_servers'] == ntp
assert not hasattr(results, 'timezone')
def test_absent_timezone(self, *args):
set_module_args(
dict(
timezone='',
server='localhost',
user='admin',
password='password',
state='absent'
)
)
# Configure the parameters that would be returned by querying the
# remote device
current = Parameters(
load_fixture('load_ntp.json')
)
client = AnsibleF5Client(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
f5_product_name=self.spec.f5_product_name
)
mm = ModuleManager(client)
# Override methods to force specific logic in the module to happen
mm.absent_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False | unknown | codeparrot/codeparrot-clean | ||
from .. import Provider as PersonProvider
class Provider(PersonProvider):
formats = (
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name}} {{last_name}}',
'{{first_name_male}}-{{first_name_male}} {{last_name}}',
'{{first_name_male}}-{{first_name_male}} {{last_name}}',
'{{first_name_female}}-{{first_name_female}} {{last_name}}',
'{{first_name_female}}-{{first_name_female}} {{last_name}}',
'{{first_name}} {{last_name}}-{{last_name}}',
'{{first_name}} {{last_name}}-{{last_name}}',
'{{prefix}} {{first_name_male}} {{last_name}}',
)
# 100 most common male first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-menn-100.html
first_names_male = (
'Adrian',
'Alexander',
'Alf',
'Anders',
'Andreas',
'Arild',
'Arne',
'Asbjørn',
'Bjørn',
'Christian',
'Dag',
'Daniel',
'Egil',
'Einar',
'Eirik',
'Eivind',
'Emil',
'Erik',
'Erling',
'Espen',
'Finn',
'Frank',
'Fredrik',
'Frode',
'Geir',
'Gunnar',
'Hans',
'Harald',
'Helge',
'Henrik',
'Håkon',
'Håvard',
'Ivar',
'Jan',
'Jens',
'Joakim',
'Johannes',
'Johan',
'John',
'Jonas',
'Jon',
'Jørgen',
'Karl',
'Kenneth',
'Kim',
'Kjell',
'Kjetil',
'Knut',
'Kåre',
'Kristian',
'Kristoffer',
'Lars',
'Leif',
'Magne',
'Magnus',
'Marius',
'Markus',
'Martin',
'Mathias',
'Morten',
'Nils',
'Odd',
'Ola',
'Olav',
'Ole',
'Per',
'Petter',
'Pål',
'Roar',
'Robert',
'Roger',
'Rolf',
'Roy',
'Rune',
'Sander',
'Sebastian',
'Sigurd',
'Simen',
'Sindre',
'Sondre',
'Steinar',
'Stein',
'Stian',
'Stig',
'Svein',
'Sverre',
'Terje',
'Thomas',
'Thor',
'Tobias',
'Tommy',
'Tom',
'Torbjørn',
'Tore',
'Tor',
'Trond',
'Vegard',
'Vidar',
'Øystein',
'Øyvind',
)
# 100 most common female first names, alphabetically.
# Source: http://www.ssb.no/a/navn/fornavn-kvinner-100.html
first_names_female = (
'Andrea',
'Anette',
'Anita',
'Anna',
'Anne',
'Ann',
'Astrid',
'Aud',
'Bente',
'Berit',
'Bjørg',
'Britt',
'Camilla',
'Cathrine',
'Cecilie',
'Elin',
'Elisabeth',
'Elise',
'Eli',
'Ellen',
'Else',
'Emilie',
'Emma',
'Eva',
'Gerd',
'Grete',
'Grethe',
'Gro',
'Gunn',
'Hanna',
'Hanne',
'Hege',
'Heidi',
'Helene',
'Hilde',
'Ida',
'Ingeborg',
'Inger',
'Ingrid',
'Irene',
'Janne',
'Jenny',
'Jorunn',
'Julie',
'Karen',
'Karin',
'Kari',
'Karoline',
'Kirsten',
'Kjersti',
'Kristine',
'Kristin',
'Laila',
'Lene',
'Linda',
'Line',
'Linn',
'Lise',
'Liv',
'Malin',
'Maren',
'Marianne',
'Maria',
'Marie',
'Mari',
'Marit',
'Marte',
'Martine',
'May',
'Mette',
'Mona',
'Monica',
'Nina',
'Nora',
'Ragnhild',
'Randi',
'Reidun',
'Rita',
'Ruth',
'Sara',
'Sigrid',
'Silje',
'Siri',
'Sissel',
'Siv',
'Sofie',
'Solveig',
'Stine',
'Synnøve',
'Thea',
'Tone',
'Tonje',
'Torill',
'Tove',
'Trine',
'Turid',
'Unni',
'Vilde',
'Wenche',
'Åse',
)
first_names = first_names_male + first_names_female
# 100 most common last names, alphabetically.
# Source: http://www.ssb.no/a/navn/alf/etter100.html
last_names = (
'Aasen',
'Aas',
'Abrahamsen',
'Ahmed',
'Ali',
'Amundsen',
'Andersen',
'Andreassen',
'Andresen',
'Antonsen',
'Arnesen',
'Aune',
'Bakken',
'Bakke',
'Berge',
'Berg',
'Berntsen',
'Bøe',
'Birkeland',
'Brekke',
'Christensen',
'Dahl',
'Danielsen',
'Edvardsen',
'Eide',
'Eliassen',
'Ellingsen',
'Engen',
'Eriksen',
'Evensen',
'Fredriksen',
'Gulbrandsen',
'Gundersen',
'Hagen',
'Halvorsen',
'Hansen',
'Hanssen',
'Haugen',
'Hauge',
'Haugland',
'Haug',
'Helland',
'Henriksen',
'Holm',
'Isaksen',
'Iversen',
'Jacobsen',
'Jakobsen',
'Jensen',
'Jenssen',
'Johannessen',
'Johansen',
'Johnsen',
'Jørgensen',
'Karlsen',
'Knudsen',
'Knutsen',
'Kristensen',
'Kristiansen',
'Kristoffersen',
'Larsen',
'Lien',
'Lie',
'Lunde',
'Lund',
'Madsen',
'Martinsen',
'Mathisen',
'Mikkelsen',
'Moen',
'Moe',
'Myhre',
'Myklebust',
'Nguyen',
'Nielsen',
'Nilsen',
'Næss',
'Nygård',
'Olsen',
'Paulsen',
'Pedersen',
'Pettersen',
'Rasmussen',
'Rønning',
'Ruud',
'Sandvik',
'Simonsen',
'Sivertsen',
'Solberg',
'Solheim',
'Sørensen',
'Sæther',
'Strand',
'Strøm',
'Svendsen',
'Tangen',
'Thomassen',
'Thorsen',
'Tveit',
'Vik',
'Ødegård',
)
prefixes = (
'Dr.', 'Prof.',
) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
from c7n.logs_support import _timestamp_from_string
from c7n.ufuncs import logsub
class TestLogsub(TestCase):
def setUp(self):
logsub.config = {"test": "data"}
def test_message_event(self):
event = {
"message": "This is a test",
"timestamp": _timestamp_from_string("Fri Feb 13 18:31:31 2009"),
}
msg = logsub.message_event(event)
self.assertEqual(msg, "Fri Feb 13 18:31:31 2009: This is a test") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
from argparse import ArgumentParser
from ctypes.util import find_library
import distutils.spawn
import glob
import tempfile
import os
import shutil
import subprocess
import signal
import sys
temp_directory_root = None
def exit_with_cleanups(status):
if temp_directory_root is not None:
shutil.rmtree(temp_directory_root)
sys.exit(status)
def print_and_exit(msg):
sys.stderr.write(msg + '\n')
exit_with_cleanups(1)
def find_and_diagnose_missing(lib, search_paths):
if os.path.exists(lib):
return os.path.abspath(lib)
if not lib.startswith('lib') or not lib.endswith('.a'):
print_and_exit(("input file '%s' not not name a static library. "
"It should start with 'lib' and end with '.a") % lib)
for sp in search_paths:
assert type(sp) is list and len(sp) == 1
path = os.path.join(sp[0], lib)
if os.path.exists(path):
return os.path.abspath(path)
print_and_exit("input '%s' does not exist" % lib)
def execute_command(cmd, cwd=None):
"""
Execute a command, capture and return its output.
"""
kwargs = {
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'cwd': cwd
}
p = subprocess.Popen(cmd, **kwargs)
out, err = p.communicate()
exitCode = p.wait()
if exitCode == -signal.SIGINT:
raise KeyboardInterrupt
return out, err, exitCode
def execute_command_verbose(cmd, cwd=None, verbose=False):
"""
Execute a command and print its output on failure.
"""
out, err, exitCode = execute_command(cmd, cwd=cwd)
if exitCode != 0 or verbose:
report = "Command: %s\n" % ' '.join(["'%s'" % a for a in cmd])
if exitCode != 0:
report += "Exit Code: %d\n" % exitCode
if out:
report += "Standard Output:\n--\n%s--" % out
if err:
report += "Standard Error:\n--\n%s--" % err
if exitCode != 0:
report += "\n\nFailed!"
sys.stderr.write('%s\n' % report)
if exitCode != 0:
exit_with_cleanups(exitCode)
return out
def main():
parser = ArgumentParser(
description="Merge multiple archives into a single library")
parser.add_argument(
'-v', '--verbose', dest='verbose', action='store_true', default=False)
parser.add_argument(
'-o', '--output', dest='output', required=True,
help='The output file. stdout is used if not given',
type=str, action='store')
parser.add_argument(
'-L', dest='search_paths',
help='Paths to search for the libraries along', action='append',
nargs=1)
parser.add_argument(
'--ar', dest='ar_exe', required=False,
help='The ar executable to use, finds \'ar\' in the path if not given',
type=str, action='store')
parser.add_argument(
'--use-libtool', dest='use_libtool', action='store_true', default=False)
parser.add_argument(
'--libtool', dest='libtool_exe', required=False,
help='The libtool executable to use, finds \'libtool\' in the path if not given',
type=str, action='store')
parser.add_argument(
'archives', metavar='archives', nargs='+',
help='The archives to merge')
args = parser.parse_args()
ar_exe = args.ar_exe
if not ar_exe:
ar_exe = distutils.spawn.find_executable('ar')
if not ar_exe:
print_and_exit("failed to find 'ar' executable")
if args.use_libtool:
libtool_exe = args.libtool_exe
if not libtool_exe:
libtool_exe = distutils.spawn.find_executable('libtool')
if not libtool_exe:
print_and_exit("failed to find 'libtool' executable")
if len(args.archives) < 2:
print_and_exit('fewer than 2 inputs provided')
archives = [find_and_diagnose_missing(ar, args.search_paths)
for ar in args.archives]
print ('Merging archives: %s' % archives)
if not os.path.exists(os.path.dirname(args.output)):
print_and_exit("output path doesn't exist: '%s'" % args.output)
global temp_directory_root
temp_directory_root = tempfile.mkdtemp('.libcxx.merge.archives')
files = []
for arc in archives:
execute_command_verbose([ar_exe, 'x', arc],
cwd=temp_directory_root, verbose=args.verbose)
out = execute_command_verbose([ar_exe, 't', arc])
files.extend(out.splitlines())
if args.use_libtool:
files = [f for f in files if not f.startswith('__.SYMDEF')]
execute_command_verbose([libtool_exe, '-static', '-o', args.output] + files,
cwd=temp_directory_root, verbose=args.verbose)
else:
execute_command_verbose([ar_exe, 'rcs', args.output] + files,
cwd=temp_directory_root, verbose=args.verbose)
if __name__ == '__main__':
main()
exit_with_cleanups(0) | unknown | codeparrot/codeparrot-clean | ||
---
c: Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
SPDX-License-Identifier: curl
Title: CURLINFO_TLS_SSL_PTR
Section: 3
Source: libcurl
See-also:
- CURLINFO_TLS_SESSION (3)
- curl_easy_getinfo (3)
- curl_easy_setopt (3)
Protocol:
- TLS
TLS-backend:
- GnuTLS
- mbedTLS
- OpenSSL
- Schannel
- wolfSSL
Added-in: 7.48.0
---
# NAME
CURLINFO_TLS_SSL_PTR - TLS session info
# SYNOPSIS
~~~c
#include <curl/curl.h>
CURLcode curl_easy_getinfo(CURL *handle, CURLINFO_TLS_SSL_PTR,
struct curl_tlssessioninfo **session);
~~~
# DESCRIPTION
Pass a pointer to a *struct curl_tlssessioninfo **. The pointer is initialized
to refer to a *struct curl_tlssessioninfo ** that contains an enum indicating
the SSL library used for the handshake and a pointer to the respective
internal TLS session structure of this underlying SSL library.
This option may be useful for example to extract certificate information in a
format convenient for further processing, such as manual validation. Refer to
the **LIMITATIONS** section.
~~~c
struct curl_tlssessioninfo {
curl_sslbackend backend;
void *internals;
};
~~~
The *backend* struct member is one of these defines: CURLSSLBACKEND_NONE (when
built without TLS support), CURLSSLBACKEND_WOLFSSL,
CURLSSLBACKEND_SECURETRANSPORT, CURLSSLBACKEND_GNUTLS, CURLSSLBACKEND_MBEDTLS,
CURLSSLBACKEND_NSS, CURLSSLBACKEND_OPENSSL or CURLSSLBACKEND_SCHANNEL. (Note
that the OpenSSL forks are all reported as just OpenSSL here.)
The *internals* struct member points to a TLS library specific pointer for
the active ("in use") SSL connection, with the following underlying types:
## GnuTLS
**gnutls_session_t**
## OpenSSL
CURLINFO_TLS_SESSION(3): **SSL_CTX ***
CURLINFO_TLS_SSL_PTR(3): **SSL ***
## mbedTLS
**mbedTLS_ssl_context ***
## Secure Channel
**CtxtHandle ***
## wolfSSL
**SSL ***
##
If the *internals* pointer is NULL then either the SSL backend is not
supported, an SSL session has not yet been established or the connection is no
longer associated with the easy handle (e.g. curl_easy_perform(3) has
returned).
# LIMITATIONS
This option has some limitations that could make it unsafe when it comes to
the manual verification of certificates.
This option only retrieves the first in-use SSL session pointer for your easy
handle, however your easy handle may have more than one in-use SSL session if
using FTP over SSL. That is because the FTP protocol has a control channel and
a data channel and one or both may be over SSL. Currently there is no way to
retrieve a second in-use SSL session associated with an easy handle.
This option has not been thoroughly tested with clear text protocols that can
be upgraded/downgraded to/from SSL: FTP, SMTP, POP3, IMAP when used with
CURLOPT_USE_SSL(3). Though you can to retrieve the SSL pointer, it is possible
that before you can do that, data (including auth) may have already been sent
over a connection after it was upgraded.
Renegotiation. If unsafe renegotiation or renegotiation in a way that the
certificate is allowed to change is allowed by your SSL library this may occur
and the certificate may change, and data may continue to be sent or received
after renegotiation but before you are able to get the (possibly) changed SSL
pointer, with the (possibly) changed certificate information.
Instead of using this option to poll for certificate changes use
CURLOPT_SSL_CTX_FUNCTION(3) to set a verification callback, if supported.
That is safer and does not suffer from any of the problems above.
How are you using this option? Are you affected by any of these limitations?
Please let us know by making a comment at
https://github.com/curl/curl/issues/685
# %PROTOCOLS%
# EXAMPLE
~~~c
#include <curl/curl.h>
#include <openssl/ssl.h>
CURL *curl;
static size_t wf(void *ptr, size_t size, size_t nmemb, void *stream)
{
const struct curl_tlssessioninfo *info = NULL;
CURLcode result = curl_easy_getinfo(curl, CURLINFO_TLS_SSL_PTR, &info);
if(info && !result) {
if(CURLSSLBACKEND_OPENSSL == info->backend) {
printf("OpenSSL ver. %s\n", SSL_get_version((SSL*)info->internals));
}
}
return size * nmemb;
}
int main(int argc, char **argv)
{
CURLcode result;
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_URL, "https://example.com");
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, wf);
result = curl_easy_perform(curl);
curl_easy_cleanup(curl);
}
return result;
}
~~~
# HISTORY
This option supersedes CURLINFO_TLS_SESSION(3) which was added in 7.34.0.
This option is exactly the same as that option except in the case of OpenSSL.
Non-OpenSSL support was added in 7.48.0.
# %AVAILABILITY%
# RETURN VALUE
curl_easy_getinfo(3) returns a CURLcode indicating success or error.
CURLE_OK (0) means everything was OK, non-zero means an error occurred, see
libcurl-errors(3). | unknown | github | https://github.com/curl/curl | docs/libcurl/opts/CURLINFO_TLS_SSL_PTR.md |
import os
import re
class FormatParser:
def __init__(self, fileName = None):
self.dictFormat = {}
self.pattern = re.compile(r'(field\d{1,2})')
self.__OpenFile(fileName)
def __OpenFile(self, fileName):
if fileName == None or os.access(fileName, os.F_OK) == False:
return
f = open(fileName)
while True:
line = f.readline()
if len(line) == 0:
# Zero length indicates EOF
break
# Ignore lines containing comments (line starts with # sign)
# and lines which seem not to be formatted correctly
if line.strip().startswith('#') or line.find(':') == -1:
continue
else:
splits = line.split(':', 1)
# Check whether the string contains RefDes or Quantity field number
element = splits[0].lower().strip()
if element == "refdes":
self.dictFormat["RefDes"] = splits[1].lower().strip()
continue
if element == "quantity":
self.dictFormat["Quantity"] = splits[1].lower().strip()
continue
listFormat = self.pattern.split(splits[1].strip())
# Remove empty elements from the format string
for each in listFormat:
if not each:
listFormat.remove(each)
self.dictFormat[element.upper()] = listFormat
f.close() | unknown | codeparrot/codeparrot-clean | ||
""" History related magics and functionality """
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011 The IPython Development Team.
#
# Distributed under the terms of the BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Stdlib imports
import atexit
import datetime
import os
import re
try:
import sqlite3
except ImportError:
try:
from pysqlite2 import dbapi2 as sqlite3
except ImportError:
sqlite3 = None
import threading
# Our own packages
from IPython.config.configurable import Configurable
from IPython.external.decorator import decorator
from IPython.utils.decorators import undoc
from IPython.utils.path import locate_profile
from IPython.utils import py3compat
from IPython.utils.traitlets import (
Any, Bool, Dict, Instance, Integer, List, Unicode, TraitError,
)
from IPython.utils.warn import warn
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
@undoc
class DummyDB(object):
"""Dummy DB that will act as a black hole for history.
Only used in the absence of sqlite"""
def execute(*args, **kwargs):
return []
def commit(self, *args, **kwargs):
pass
def __enter__(self, *args, **kwargs):
pass
def __exit__(self, *args, **kwargs):
pass
@decorator
def needs_sqlite(f, self, *a, **kw):
"""Decorator: return an empty list in the absence of sqlite."""
if sqlite3 is None or not self.enabled:
return []
else:
return f(self, *a, **kw)
if sqlite3 is not None:
DatabaseError = sqlite3.DatabaseError
else:
@undoc
class DatabaseError(Exception):
"Dummy exception when sqlite could not be imported. Should never occur."
@decorator
def catch_corrupt_db(f, self, *a, **kw):
"""A decorator which wraps HistoryAccessor method calls to catch errors from
a corrupt SQLite database, move the old database out of the way, and create
a new one.
"""
try:
return f(self, *a, **kw)
except DatabaseError:
if os.path.isfile(self.hist_file):
# Try to move the file out of the way
base,ext = os.path.splitext(self.hist_file)
newpath = base + '-corrupt' + ext
os.rename(self.hist_file, newpath)
self.init_db()
print("ERROR! History file wasn't a valid SQLite database.",
"It was moved to %s" % newpath, "and a new file created.")
return []
else:
# The hist_file is probably :memory: or something else.
raise
class HistoryAccessor(Configurable):
"""Access the history database without adding to it.
This is intended for use by standalone history tools. IPython shells use
HistoryManager, below, which is a subclass of this."""
# String holding the path to the history file
hist_file = Unicode(config=True,
help="""Path to file to use for SQLite history database.
By default, IPython will put the history database in the IPython
profile directory. If you would rather share one history among
profiles, you can set this value in each, so that they are consistent.
Due to an issue with fcntl, SQLite is known to misbehave on some NFS
mounts. If you see IPython hanging, try setting this to something on a
local disk, e.g::
ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
""")
enabled = Bool(True, config=True,
help="""enable the SQLite history
set enabled=False to disable the SQLite history,
in which case there will be no stored history, no SQLite connection,
and no background saving thread. This may be necessary in some
threaded environments where IPython is embedded.
"""
)
connection_options = Dict(config=True,
help="""Options for configuring the SQLite connection
These options are passed as keyword args to sqlite3.connect
when establishing database conenctions.
"""
)
# The SQLite database
db = Any()
def _db_changed(self, name, old, new):
"""validate the db, since it can be an Instance of two different types"""
connection_types = (DummyDB,)
if sqlite3 is not None:
connection_types = (DummyDB, sqlite3.Connection)
if not isinstance(new, connection_types):
msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % \
(self.__class__.__name__, new)
raise TraitError(msg)
def __init__(self, profile='default', hist_file=u'', **traits):
"""Create a new history accessor.
Parameters
----------
profile : str
The name of the profile from which to open history.
hist_file : str
Path to an SQLite history database stored by IPython. If specified,
hist_file overrides profile.
config : :class:`~IPython.config.loader.Config`
Config object. hist_file can also be set through this.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryAccessor, self).__init__(**traits)
# defer setting hist_file from kwarg until after init,
# otherwise the default kwarg value would clobber any value
# set by config
if hist_file:
self.hist_file = hist_file
if self.hist_file == u'':
# No one has set the hist_file, yet.
self.hist_file = self._get_hist_file_name(profile)
if sqlite3 is None and self.enabled:
warn("IPython History requires SQLite, your history will not be saved")
self.enabled = False
self.init_db()
def _get_hist_file_name(self, profile='default'):
"""Find the history file for the given profile name.
This is overridden by the HistoryManager subclass, to use the shell's
active profile.
Parameters
----------
profile : str
The name of a profile which has a history file.
"""
return os.path.join(locate_profile(profile), 'history.sqlite')
@catch_corrupt_db
def init_db(self):
"""Connect to the database, and create tables if necessary."""
if not self.enabled:
self.db = DummyDB()
return
# use detect_types so that timestamps return datetime objects
kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
kwargs.update(self.connection_options)
self.db = sqlite3.connect(self.hist_file, **kwargs)
self.db.execute("""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)""")
self.db.execute("""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))""")
# Output history is optional, but ensure the table's there so it can be
# enabled later.
self.db.execute("""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))""")
self.db.commit()
def writeout_cache(self):
"""Overridden by HistoryManager to dump the cache before certain
database lookups."""
pass
## -------------------------------
## Methods for retrieving history:
## -------------------------------
def _run_sql(self, sql, params, raw=True, output=False):
"""Prepares and runs an SQL query for the history database.
Parameters
----------
sql : str
Any filtering expressions to go after SELECT ... FROM ...
params : tuple
Parameters passed to the SQL query (to replace "?")
raw, output : bool
See :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
toget = 'source_raw' if raw else 'source'
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
cur = self.db.execute("SELECT session, line, %s FROM %s " %\
(toget, sqlfrom) + sql, params)
if output: # Regroup into 3-tuples, and parse JSON
return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
return cur
@needs_sqlite
@catch_corrupt_db
def get_session_info(self, session):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
query = "SELECT * from sessions where session == ?"
return self.db.execute(query, (session,)).fetchone()
@catch_corrupt_db
def get_last_session_id(self):
"""Get the last session ID currently in the database.
Within IPython, this should be the same as the value stored in
:attr:`HistoryManager.session_number`.
"""
for record in self.get_tail(n=1, include_latest=True):
return record[0]
@catch_corrupt_db
def get_tail(self, n=10, raw=True, output=False, include_latest=False):
"""Get the last n lines from the history database.
Parameters
----------
n : int
The number of lines to get
raw, output : bool
See :meth:`get_range`
include_latest : bool
If False (default), n+1 lines are fetched, and the latest one
is discarded. This is intended to be used where the function
is called by a user command, which it should not return.
Returns
-------
Tuples as :meth:`get_range`
"""
self.writeout_cache()
if not include_latest:
n += 1
cur = self._run_sql("ORDER BY session DESC, line DESC LIMIT ?",
(n,), raw=raw, output=output)
if not include_latest:
return reversed(list(cur)[1:])
return reversed(list(cur))
@catch_corrupt_db
def search(self, pattern="*", raw=True, search_raw=True,
output=False, n=None, unique=False):
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
n : None or int
If an integer is given, it defines the limit of
returned entries.
unique : bool
When it is true, return only unique entries.
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
sqlform = "WHERE %s GLOB ?" % tosearch
params = (pattern,)
if unique:
sqlform += ' GROUP BY {0}'.format(tosearch)
if n is not None:
sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
params += (n,)
elif unique:
sqlform += " ORDER BY session, line"
cur = self._run_sql(sqlform, params, raw=raw, output=output)
if n is not None:
return reversed(list(cur))
return cur
@catch_corrupt_db
def get_range(self, session, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if stop:
lineclause = "line >= ? AND line < ?"
params = (session, start, stop)
else:
lineclause = "line>=?"
params = (session, start)
return self._run_sql("WHERE session==? AND %s" % lineclause,
params, raw=raw, output=output)
def get_range_by_str(self, rangestr, raw=True, output=False):
"""Get lines of history from a string of ranges, as used by magic
commands %hist, %save, %macro, etc.
Parameters
----------
rangestr : str
A string specifying ranges, e.g. "5 ~2/1-4". See
:func:`magic_history` for full details.
raw, output : bool
As :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
for sess, s, e in extract_hist_ranges(rangestr):
for line in self.get_range(sess, s, e, raw=raw, output=output):
yield line
class HistoryManager(HistoryAccessor):
"""A class to organize all history-related functionality in one place.
"""
# Public interface
# An instance of the IPython shell we are attached to
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
# Lists to hold processed and raw history. These start with a blank entry
# so that we can index them starting from 1
input_hist_parsed = List([""])
input_hist_raw = List([""])
# A list of directories visited during session
dir_hist = List()
def _dir_hist_default(self):
try:
return [py3compat.getcwd()]
except OSError:
return []
# A dict of output history, keyed with ints from the shell's
# execution count.
output_hist = Dict()
# The text/plain repr of outputs.
output_hist_reprs = Dict()
# The number of the current session in the history database
session_number = Integer()
db_log_output = Bool(False, config=True,
help="Should the history database include output? (default: no)"
)
db_cache_size = Integer(0, config=True,
help="Write to database every x commands (higher values save disk access & power).\n"
"Values of 1 or less effectively disable caching."
)
# The input and output caches
db_input_cache = List()
db_output_cache = List()
# History saving in separate thread
save_thread = Instance('IPython.core.history.HistorySavingThread')
try: # Event is a function returning an instance of _Event...
save_flag = Instance(threading._Event)
except AttributeError: # ...until Python 3.3, when it's a class.
save_flag = Instance(threading.Event)
# Private interface
# Variables used to store the three last inputs from the user. On each new
# history update, we populate the user's namespace with these, shifted as
# necessary.
_i00 = Unicode(u'')
_i = Unicode(u'')
_ii = Unicode(u'')
_iii = Unicode(u'')
# A regex matching all forms of the exit command, so that we don't store
# them in the history (it's annoying to rewind the first entry and land on
# an exit call).
_exit_re = re.compile(r"(exit|quit)(\s*\(.*\))?$")
def __init__(self, shell=None, config=None, **traits):
"""Create a new history manager associated with a shell instance.
"""
# We need a pointer back to the shell for various tasks.
super(HistoryManager, self).__init__(shell=shell, config=config,
**traits)
self.save_flag = threading.Event()
self.db_input_cache_lock = threading.Lock()
self.db_output_cache_lock = threading.Lock()
if self.enabled and self.hist_file != ':memory:':
self.save_thread = HistorySavingThread(self)
self.save_thread.start()
self.new_session()
def _get_hist_file_name(self, profile=None):
"""Get default history file name based on the Shell's profile.
The profile parameter is ignored, but must exist for compatibility with
the parent class."""
profile_dir = self.shell.profile_dir.location
return os.path.join(profile_dir, 'history.sqlite')
@needs_sqlite
def new_session(self, conn=None):
"""Get a new session number."""
if conn is None:
conn = self.db
with conn:
cur = conn.execute("""INSERT INTO sessions VALUES (NULL, ?, NULL,
NULL, "") """, (datetime.datetime.now(),))
self.session_number = cur.lastrowid
def end_session(self):
"""Close the database session, filling in the end time and line count."""
self.writeout_cache()
with self.db:
self.db.execute("""UPDATE sessions SET end=?, num_cmds=? WHERE
session==?""", (datetime.datetime.now(),
len(self.input_hist_parsed)-1, self.session_number))
self.session_number = 0
def name_session(self, name):
"""Give the current session a name in the history database."""
with self.db:
self.db.execute("UPDATE sessions SET remark=? WHERE session==?",
(name, self.session_number))
def reset(self, new_session=True):
"""Clear the session history, releasing all object references, and
optionally open a new session."""
self.output_hist.clear()
# The directory history can't be completely empty
self.dir_hist[:] = [py3compat.getcwd()]
if new_session:
if self.session_number:
self.end_session()
self.input_hist_parsed[:] = [""]
self.input_hist_raw[:] = [""]
self.new_session()
# ------------------------------
# Methods for retrieving history
# ------------------------------
def get_session_info(self, session=0):
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is the previous session.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : unicode
A manually set description.
"""
if session <= 0:
session += self.session_number
return super(HistoryManager, self).get_session_info(session=session)
def _get_range_session(self, start=1, stop=None, raw=True, output=False):
"""Get input and output history from the current session. Called by
get_range, and takes similar parameters."""
input_hist = self.input_hist_raw if raw else self.input_hist_parsed
n = len(input_hist)
if start < 0:
start += n
if not stop or (stop > n):
stop = n
elif stop < 0:
stop += n
for i in range(start, stop):
if output:
line = (input_hist[i], self.output_hist_reprs.get(i))
else:
line = input_hist[i]
yield (0, i, line)
def get_range(self, session=0, start=1, stop=None, raw=True,output=False):
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve. The current session is 0, and negative
numbers count back from current session, so -1 is previous session.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
if session <= 0:
session += self.session_number
if session==self.session_number: # Current session
return self._get_range_session(start, stop, raw, output)
return super(HistoryManager, self).get_range(session, start, stop, raw,
output)
## ----------------------------
## Methods for storing history:
## ----------------------------
def store_inputs(self, line_num, source, source_raw=None):
"""Store source and raw input in history and create input cache
variables ``_i*``.
Parameters
----------
line_num : int
The prompt number of this input.
source : str
Python input.
source_raw : str, optional
If given, this is the raw input without any IPython transformations
applied to it. If not given, ``source`` is used.
"""
if source_raw is None:
source_raw = source
source = source.rstrip('\n')
source_raw = source_raw.rstrip('\n')
# do not store exit/quit commands
if self._exit_re.match(source_raw.strip()):
return
self.input_hist_parsed.append(source)
self.input_hist_raw.append(source_raw)
with self.db_input_cache_lock:
self.db_input_cache.append((line_num, source, source_raw))
# Trigger to flush cache and write to DB.
if len(self.db_input_cache) >= self.db_cache_size:
self.save_flag.set()
# update the auto _i variables
self._iii = self._ii
self._ii = self._i
self._i = self._i00
self._i00 = source_raw
# hackish access to user namespace to create _i1,_i2... dynamically
new_i = '_i%s' % line_num
to_main = {'_i': self._i,
'_ii': self._ii,
'_iii': self._iii,
new_i : self._i00 }
if self.shell is not None:
self.shell.push(to_main, interactive=False)
def store_output(self, line_num):
"""If database output logging is enabled, this saves all the
outputs from the indicated prompt number to the database. It's
called by run_cell after code has been executed.
Parameters
----------
line_num : int
The line number from which to save outputs
"""
if (not self.db_log_output) or (line_num not in self.output_hist_reprs):
return
output = self.output_hist_reprs[line_num]
with self.db_output_cache_lock:
self.db_output_cache.append((line_num, output))
if self.db_cache_size <= 1:
self.save_flag.set()
def _writeout_input_cache(self, conn):
with conn:
for line in self.db_input_cache:
conn.execute("INSERT INTO history VALUES (?, ?, ?, ?)",
(self.session_number,)+line)
def _writeout_output_cache(self, conn):
with conn:
for line in self.db_output_cache:
conn.execute("INSERT INTO output_history VALUES (?, ?, ?)",
(self.session_number,)+line)
@needs_sqlite
def writeout_cache(self, conn=None):
"""Write any entries in the cache to the database."""
if conn is None:
conn = self.db
with self.db_input_cache_lock:
try:
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
self.new_session(conn)
print("ERROR! Session/line number was not unique in",
"database. History logging moved to new session",
self.session_number)
try:
# Try writing to the new session. If this fails, don't
# recurse
self._writeout_input_cache(conn)
except sqlite3.IntegrityError:
pass
finally:
self.db_input_cache = []
with self.db_output_cache_lock:
try:
self._writeout_output_cache(conn)
except sqlite3.IntegrityError:
print("!! Session/line number for output was not unique",
"in database. Output will not be stored.")
finally:
self.db_output_cache = []
class HistorySavingThread(threading.Thread):
"""This thread takes care of writing history to the database, so that
the UI isn't held up while that happens.
It waits for the HistoryManager's save_flag to be set, then writes out
the history cache. The main thread is responsible for setting the flag when
the cache size reaches a defined threshold."""
daemon = True
stop_now = False
enabled = True
def __init__(self, history_manager):
super(HistorySavingThread, self).__init__(name="IPythonHistorySavingThread")
self.history_manager = history_manager
self.enabled = history_manager.enabled
atexit.register(self.stop)
@needs_sqlite
def run(self):
# We need a separate db connection per thread:
try:
self.db = sqlite3.connect(self.history_manager.hist_file,
**self.history_manager.connection_options
)
while True:
self.history_manager.save_flag.wait()
if self.stop_now:
return
self.history_manager.save_flag.clear()
self.history_manager.writeout_cache(self.db)
except Exception as e:
print(("The history saving thread hit an unexpected error (%s)."
"History will not be written to the database.") % repr(e))
def stop(self):
"""This can be called from the main thread to safely stop this thread.
Note that it does not attempt to write out remaining history before
exiting. That should be done by calling the HistoryManager's
end_session method."""
self.stop_now = True
self.history_manager.save_flag.set()
self.join()
# To match, e.g. ~5/8-~2/3
range_re = re.compile(r"""
((?P<startsess>~?\d+)/)?
(?P<start>\d+)?
((?P<sep>[\-:])
((?P<endsess>~?\d+)/)?
(?P<end>\d+))?
$""", re.VERBOSE)
def extract_hist_ranges(ranges_str):
"""Turn a string of history ranges into 3-tuples of (session, start, stop).
Examples
--------
>>> list(extract_hist_ranges("~8/5-~7/4 2"))
[(-8, 5, None), (-7, 1, 5), (0, 2, 3)]
"""
for range_str in ranges_str.split():
rmatch = range_re.match(range_str)
if not rmatch:
continue
start = rmatch.group("start")
if start:
start = int(start)
end = rmatch.group("end")
# If no end specified, get (a, a + 1)
end = int(end) if end else start + 1
else: # start not specified
if not rmatch.group('startsess'): # no startsess
continue
start = 1
end = None # provide the entire session hist
if rmatch.group("sep") == "-": # 1-3 == 1:4 --> [1, 2, 3]
end += 1
startsess = rmatch.group("startsess") or "0"
endsess = rmatch.group("endsess") or startsess
startsess = int(startsess.replace("~","-"))
endsess = int(endsess.replace("~","-"))
assert endsess >= startsess, "start session must be earlier than end session"
if endsess == startsess:
yield (startsess, start, end)
continue
# Multiple sessions in one range:
yield (startsess, start, None)
for sess in range(startsess+1, endsess):
yield (sess, 1, None)
yield (endsess, 1, end)
def _format_lineno(session, line):
"""Helper function to format line numbers properly."""
if session == 0:
return str(line)
return "%s#%s" % (session, line) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines trials for parameter exploration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import hyperparams
class Trial(object):
"""Base class for a trial."""
@classmethod
def Params(cls):
"""Default parameters for a trial."""
p = hyperparams.Params()
p.Define(
'report_interval_seconds', 600,
'Interval between reporting trial results and checking for early '
'stopping.')
p.Define('vizier_objective_metric_key', 'loss',
'Which eval metric to use as the "objective value" for tuning.')
p.Define(
'report_during_training', False,
'Whether to report objective metrics during the training process.')
return p
def __init__(self, params):
self._params = params.Copy()
self._next_report_time = time.time()
@property
def report_interval_seconds(self):
return self._params.report_interval_seconds
@property
def objective_metric_key(self):
return self._params.vizier_objective_metric_key
def Name(self):
raise NotImplementedError('Abstract method')
def OverrideModelParams(self, model_params):
"""Modifies `model_params` according to trial params.
Through this method a `Trial` may tweak model hyperparams (e.g., learning
rate, shape, depth, or width of networks).
Args:
model_params: the original model hyperparams.
Returns:
The modified `model_params`.
"""
raise NotImplementedError('Abstract method')
def ShouldStop(self):
"""Returns whether the trial should stop."""
raise NotImplementedError('Abstract method')
def ReportDone(self, infeasible=False, infeasible_reason=''):
"""Report that the trial is completed."""
raise NotImplementedError('Abstract method')
def ShouldStopAndMaybeReport(self, global_step, metrics_dict):
"""Returns whether the trial should stop.
Args:
global_step: The global step counter.
metrics_dict: If not None, contains the metric should be
reported. If None, do nothing but returns whether the
trial should stop.
"""
if not metrics_dict or not self._params.report_during_training:
return self.ShouldStop()
if time.time() < self._next_report_time:
return False
self._next_report_time = time.time() + self.report_interval_seconds
return self._DoReportTrainingProgress(global_step, metrics_dict)
def _DoReportTrainingProgress(self, global_step, metrics_dict):
raise NotImplementedError('Abstract method')
def ReportEvalMeasure(self, global_step, metrics_dict, checkpoint_path):
"""Reports eval measurement and returns whether the trial should stop."""
raise NotImplementedError('Abstract method')
class NoOpTrial(Trial):
"""A Trial implementation that does nothing."""
def __init__(self):
super(NoOpTrial, self).__init__(Trial.Params())
def Name(self):
return ''
def OverrideModelParams(self, model_params):
return model_params
def ShouldStop(self):
return False
def ReportDone(self, infeasible=False, infeasible_reason=''):
return False
def ShouldStopAndMaybeReport(self, global_step, metrics_dict):
del global_step, metrics_dict # Unused
return False
def ReportEvalMeasure(self, global_step, metrics_dict, checkpoint_path):
del global_step, metrics_dict, checkpoint_path # Unused
return False | unknown | codeparrot/codeparrot-clean | ||
// This file was automatically generated from flow.md by Knit tool. Do not edit.
package kotlinx.coroutines.guide.exampleFlow23
import kotlinx.coroutines.*
import kotlinx.coroutines.flow.*
fun requestFlow(i: Int): Flow<String> = flow {
emit("$i: First")
delay(500) // wait 500 ms
emit("$i: Second")
}
fun main() = runBlocking<Unit> {
val startTime = currentTimeMillis() // remember the start time
(1..3).asFlow().onEach { delay(100) } // emit a number every 100 ms
.flatMapConcat { requestFlow(it) }
.collect { value -> // collect and print
println("$value at ${currentTimeMillis() - startTime} ms from start")
}
} | kotlin | github | https://github.com/Kotlin/kotlinx.coroutines | kotlinx-coroutines-core/jvm/test/guide/example-flow-23.kt |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
#in this file, we mostly add the tag translate=True on existing fields that we now want to be translated
class account_account_template(osv.osv):
_inherit = 'account.account.template'
_columns = {
'name': fields.char('Name', size=128, required=True, select=True, translate=True),
}
class account_account(osv.osv):
_inherit = 'account.account'
_columns = {
'name': fields.char('Name', size=128, required=True, select=True, translate=True),
}
class account_tax(osv.osv):
_inherit = 'account.tax'
_columns = {
'name': fields.char('Tax Name', size=128, required=True, select=True, translate=True),
}
class account_tax_template(osv.osv):
_inherit = 'account.tax.template'
_columns = {
'name': fields.char('Tax Name', size=128, required=True, select=True, translate=True),
}
class account_tax_code_template(osv.osv):
_inherit = 'account.tax.code.template'
_columns = {
'name': fields.char('Tax Case Name', size=64, required=True, translate=True),
}
class account_chart_template(osv.osv):
_inherit = 'account.chart.template'
_columns={
'name': fields.char('Name', size=64, required=True, translate=True),
'spoken_languages': fields.char('Spoken Languages', size=64, help="State here the languages for which the translations of templates could be loaded at the time of installation of this localization module and copied in the final object when generating them from templates. You must provide the language codes separated by ';'"),
}
_order = 'name'
class account_fiscal_position(osv.osv):
_inherit = 'account.fiscal.position'
_columns = {
'name': fields.char('Fiscal Position', size=64, required=True, translate=True),
}
class account_fiscal_position_template(osv.osv):
_inherit = 'account.fiscal.position.template'
_columns = {
'name': fields.char('Fiscal Position Template', size=64, required=True, translate=True),
}
class account_journal(osv.osv):
_inherit = 'account.journal'
_columns = {
'name': fields.char('Journal Name', size=64, required=True, translate=True),
}
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_columns = {
'name': fields.char('Account Name', size=128, required=True, translate=True),
}
class account_analytic_journal(osv.osv):
_inherit = 'account.analytic.journal'
_columns = {
'name': fields.char('Journal Name', size=64, required=True, translate=True),
} | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest import config
CONF = config.CONF
class NetworksTest(base.BaseComputeAdminTest):
_api_version = 2
"""
Tests Nova Networks API that usually requires admin privileges.
API docs:
http://developer.openstack.org/api-ref-compute-v2-ext.html#ext-os-networks
"""
@classmethod
def resource_setup(cls):
super(NetworksTest, cls).resource_setup()
cls.client = cls.os_adm.networks_client
def test_get_network(self):
resp, networks = self.client.list_networks()
configured_network = [x for x in networks if x['label'] ==
CONF.compute.fixed_network_name]
self.assertEqual(1, len(configured_network),
"{0} networks with label {1}".format(
len(configured_network),
CONF.compute.fixed_network_name))
configured_network = configured_network[0]
_, network = self.client.get_network(configured_network['id'])
self.assertEqual(configured_network['label'], network['label'])
def test_list_all_networks(self):
_, networks = self.client.list_networks()
# Check the configured network is in the list
configured_network = CONF.compute.fixed_network_name
self.assertIn(configured_network, [x['label'] for x in networks]) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Write a program to solve a Sudoku puzzle by filling the empty cells.
# Empty cells are indicated by the character '.'.
# You may assume that there will be only one unique solution.
# A sudoku puzzle...
# 13974865.
# 7........
# .2.1.9...
# ..7...24.
# .64.1.59.
# .98...3..
# ...8.3.2.
# ........6
# ...2759..
# ...and its solution numbers marked in red.
# this is a brutal force solution, works but too slow
class Solution(object):
def solveSudoku(self, board):
self.helper(board)
def helper(self, board):
for i, row in enumerate(board):
for j, c in enumerate(row):
if c == '.':
row_taken = set([board[i][ii] for ii in xrange(9)])
col_taken = set([board[jj][j] for jj in xrange(9)])
sq_taken = set([board[rr][cc] for rr in xrange(i/3*3, i/3*3+3) for cc in xrange(j/3*3, j/3*3+3)])
taken = row_taken | col_taken | sq_taken
ms = [str(x) for x in xrange(1, 10) if str(x) not in taken] # possible moves
for move in ms:
board[i][j] = move
if self.helper(board): return True # if it works
board[i][j] = '.'
return False # if no possible moves work, return false
return True # if no empty slots, return True
s = Solution()
board = ["..9748...","7........",".2.1.9...","..7...24.",".64.1.59.",".98...3..","...8.3.2.","........6","...2759.."]
board = [list(row) for row in board]
s.solveSudoku(board)
print(board) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_system
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage the system attributes on Juniper JUNOS devices
description:
- This module provides declarative management of node system attributes
on Juniper JUNOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: configure hostname and domain name
junos_system:
hostname: junos01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- juniper.com
- name: remove configuration
junos_system:
state: absent
- name: configure name servers
junos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: str
sample: >
[edit system]
+ host-name test;
+ domain-name ansible.com;
+ domain-search redhat.com;
[edit system name-server]
172.26.1.1 { ... }
+ 8.8.8.8;
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.junos import junos_argument_spec, tostring
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config
USE_PERSISTENT_CONNECTION = True
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
state=dict(choices=['present', 'absent'], default='present'),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
params = ['hostname', 'domain_name', 'domain_search', 'name_servers']
required_if = [('state', 'present', params, True),
('state', 'absent', params, True),
('state', 'active', params, True),
('state', 'suspend', params, True)]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'system'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('hostname', {'xpath': 'host-name', 'leaf_only': True}),
('domain_name', {'xpath': 'domain-name', 'leaf_only': True}),
('domain_search', {'xpath': 'domain-search', 'leaf_only': True, 'value_req': True}),
('name_servers', {'xpath': 'name-server/name', 'is_key': True})
])
validate_param_values(module, param_to_xpath_map)
want = map_params_to_obj(module, param_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
with locked_config(module):
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
// RUN: clang-reorder-fields -record-name Foo -fields-order c,e1,e3,e2,a,b %s -- | FileCheck %s
class Foo {
int a; // Trailing comment for a.
int b; // Multiline
// trailing for b.
// Prefix comments for c.
int c;
/*c-like*/ int e1;
int /*c-like*/ e2;
int e3 /*c-like*/;
};
// Note: the position of the empty line is somewhat arbitrary.
// CHECK: // Prefix comments for c.
// CHECK-NEXT: int c;
// CHECK-NEXT: /*c-like*/ int e1;
// CHECK-NEXT: int e3 /*c-like*/;
// CHECK-EMPTY:
// CHECK-NEXT: int /*c-like*/ e2;
// CHECK-NEXT: int a; // Trailing comment for a.
// CHECK-NEXT: int b; // Multiline
// CHECK-NEXT: // trailing for b. | cpp | github | https://github.com/llvm/llvm-project | clang-tools-extra/test/clang-reorder-fields/Comments.cpp |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import timedelta
from tornado import gen, locks
from tornado.gen import TimeoutError
from tornado.testing import gen_test, AsyncTestCase
from tornado.test.util import unittest
class ConditionTest(AsyncTestCase):
def setUp(self):
super(ConditionTest, self).setUp()
self.history = []
def record_done(self, future, key):
"""Record the resolution of a Future returned by Condition.wait."""
def callback(_):
if not future.result():
# wait() resolved to False, meaning it timed out.
self.history.append('timeout')
else:
self.history.append(key)
future.add_done_callback(callback)
def test_repr(self):
c = locks.Condition()
self.assertIn('Condition', repr(c))
self.assertNotIn('waiters', repr(c))
c.wait()
self.assertIn('waiters', repr(c))
@gen_test
def test_notify(self):
c = locks.Condition()
self.io_loop.call_later(0.01, c.notify)
yield c.wait()
def test_notify_1(self):
c = locks.Condition()
self.record_done(c.wait(), 'wait1')
self.record_done(c.wait(), 'wait2')
c.notify(1)
self.history.append('notify1')
c.notify(1)
self.history.append('notify2')
self.assertEqual(['wait1', 'notify1', 'wait2', 'notify2'],
self.history)
def test_notify_n(self):
c = locks.Condition()
for i in range(6):
self.record_done(c.wait(), i)
c.notify(3)
# Callbacks execute in the order they were registered.
self.assertEqual(list(range(3)), self.history)
c.notify(1)
self.assertEqual(list(range(4)), self.history)
c.notify(2)
self.assertEqual(list(range(6)), self.history)
def test_notify_all(self):
c = locks.Condition()
for i in range(4):
self.record_done(c.wait(), i)
c.notify_all()
self.history.append('notify_all')
# Callbacks execute in the order they were registered.
self.assertEqual(
list(range(4)) + ['notify_all'],
self.history)
@gen_test
def test_wait_timeout(self):
c = locks.Condition()
wait = c.wait(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, c.notify) # Too late.
yield gen.sleep(0.03)
self.assertFalse((yield wait))
@gen_test
def test_wait_timeout_preempted(self):
c = locks.Condition()
# This fires before the wait times out.
self.io_loop.call_later(0.01, c.notify)
wait = c.wait(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield wait # No TimeoutError.
@gen_test
def test_notify_n_with_timeout(self):
# Register callbacks 0, 1, 2, and 3. Callback 1 has a timeout.
# Wait for that timeout to expire, then do notify(2) and make
# sure everyone runs. Verifies that a timed-out callback does
# not count against the 'n' argument to notify().
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
self.record_done(c.wait(), 3)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify(2)
yield gen.sleep(0.01)
self.assertEqual(['timeout', 0, 2], self.history)
self.assertEqual(['timeout', 0, 2], self.history)
c.notify()
self.assertEqual(['timeout', 0, 2, 3], self.history)
@gen_test
def test_notify_all_with_timeout(self):
c = locks.Condition()
self.record_done(c.wait(), 0)
self.record_done(c.wait(timedelta(seconds=0.01)), 1)
self.record_done(c.wait(), 2)
# Wait for callback 1 to time out.
yield gen.sleep(0.02)
self.assertEqual(['timeout'], self.history)
c.notify_all()
self.assertEqual(['timeout', 0, 2], self.history)
@gen_test
def test_nested_notify(self):
# Ensure no notifications lost, even if notify() is reentered by a
# waiter calling notify().
c = locks.Condition()
# Three waiters.
futures = [c.wait() for _ in range(3)]
# First and second futures resolved. Second future reenters notify(),
# resolving third future.
futures[1].add_done_callback(lambda _: c.notify())
c.notify(2)
self.assertTrue(all(f.done() for f in futures))
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
c = locks.Condition()
for _ in range(101):
c.wait(timedelta(seconds=0.01))
future = c.wait()
self.assertEqual(102, len(c._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(c._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
c.notify()
self.assertTrue(future.done())
class EventTest(AsyncTestCase):
def test_repr(self):
event = locks.Event()
self.assertTrue('clear' in str(event))
self.assertFalse('set' in str(event))
event.set()
self.assertFalse('clear' in str(event))
self.assertTrue('set' in str(event))
def test_event(self):
e = locks.Event()
future_0 = e.wait()
e.set()
future_1 = e.wait()
e.clear()
future_2 = e.wait()
self.assertTrue(future_0.done())
self.assertTrue(future_1.done())
self.assertFalse(future_2.done())
@gen_test
def test_event_timeout(self):
e = locks.Event()
with self.assertRaises(TimeoutError):
yield e.wait(timedelta(seconds=0.01))
# After a timed-out waiter, normal operation works.
self.io_loop.add_timeout(timedelta(seconds=0.01), e.set)
yield e.wait(timedelta(seconds=1))
def test_event_set_multiple(self):
e = locks.Event()
e.set()
e.set()
self.assertTrue(e.is_set())
def test_event_wait_clear(self):
e = locks.Event()
f0 = e.wait()
e.clear()
f1 = e.wait()
e.set()
self.assertTrue(f0.done())
self.assertTrue(f1.done())
class SemaphoreTest(AsyncTestCase):
def test_negative_value(self):
self.assertRaises(ValueError, locks.Semaphore, value=-1)
def test_repr(self):
sem = locks.Semaphore()
self.assertIn('Semaphore', repr(sem))
self.assertIn('unlocked,value:1', repr(sem))
sem.acquire()
self.assertIn('locked', repr(sem))
self.assertNotIn('waiters', repr(sem))
sem.acquire()
self.assertIn('waiters', repr(sem))
def test_acquire(self):
sem = locks.Semaphore()
f0 = sem.acquire()
self.assertTrue(f0.done())
# Wait for release().
f1 = sem.acquire()
self.assertFalse(f1.done())
f2 = sem.acquire()
sem.release()
self.assertTrue(f1.done())
self.assertFalse(f2.done())
sem.release()
self.assertTrue(f2.done())
sem.release()
# Now acquire() is instant.
self.assertTrue(sem.acquire().done())
self.assertEqual(0, len(sem._waiters))
@gen_test
def test_acquire_timeout(self):
sem = locks.Semaphore(2)
yield sem.acquire()
yield sem.acquire()
acquire = sem.acquire(timedelta(seconds=0.01))
self.io_loop.call_later(0.02, sem.release) # Too late.
yield gen.sleep(0.3)
with self.assertRaises(gen.TimeoutError):
yield acquire
sem.acquire()
f = sem.acquire()
self.assertFalse(f.done())
sem.release()
self.assertTrue(f.done())
@gen_test
def test_acquire_timeout_preempted(self):
sem = locks.Semaphore(1)
yield sem.acquire()
# This fires before the wait times out.
self.io_loop.call_later(0.01, sem.release)
acquire = sem.acquire(timedelta(seconds=0.02))
yield gen.sleep(0.03)
yield acquire # No TimeoutError.
def test_release_unacquired(self):
# Unbounded releases are allowed, and increment the semaphore's value.
sem = locks.Semaphore()
sem.release()
sem.release()
# Now the counter is 3. We can acquire three times before blocking.
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertTrue(sem.acquire().done())
self.assertFalse(sem.acquire().done())
@gen_test
def test_garbage_collection(self):
# Test that timed-out waiters are occasionally cleaned from the queue.
sem = locks.Semaphore(value=0)
futures = [sem.acquire(timedelta(seconds=0.01)) for _ in range(101)]
future = sem.acquire()
self.assertEqual(102, len(sem._waiters))
# Let first 101 waiters time out, triggering a collection.
yield gen.sleep(0.02)
self.assertEqual(1, len(sem._waiters))
# Final waiter is still active.
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Prevent "Future exception was never retrieved" messages.
for future in futures:
self.assertRaises(TimeoutError, future.result)
class SemaphoreContextManagerTest(AsyncTestCase):
@gen_test
def test_context_manager(self):
sem = locks.Semaphore()
with (yield sem.acquire()) as yielded:
self.assertTrue(yielded is None)
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_exception(self):
sem = locks.Semaphore()
with self.assertRaises(ZeroDivisionError):
with (yield sem.acquire()):
1 / 0
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout(self):
sem = locks.Semaphore()
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Semaphore was released and can be acquired again.
self.assertTrue(sem.acquire().done())
@gen_test
def test_context_manager_timeout_error(self):
sem = locks.Semaphore(value=0)
with self.assertRaises(gen.TimeoutError):
with (yield sem.acquire(timedelta(seconds=0.01))):
pass
# Counter is still 0.
self.assertFalse(sem.acquire().done())
@gen_test
def test_context_manager_contended(self):
sem = locks.Semaphore()
history = []
@gen.coroutine
def f(index):
with (yield sem.acquire()):
history.append('acquired %d' % index)
yield gen.sleep(0.01)
history.append('release %d' % index)
yield [f(i) for i in range(2)]
expected_history = []
for i in range(2):
expected_history.extend(['acquired %d' % i, 'release %d' % i])
self.assertEqual(expected_history, history)
@gen_test
def test_yield_sem(self):
# Ensure we catch a "with (yield sem)", which should be
# "with (yield sem.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Semaphore()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with sem", which should be
# "with (yield sem.acquire())".
with self.assertRaises(RuntimeError):
with locks.Semaphore():
pass
class BoundedSemaphoreTest(AsyncTestCase):
def test_release_unacquired(self):
sem = locks.BoundedSemaphore()
self.assertRaises(ValueError, sem.release)
# Value is 0.
sem.acquire()
# Block on acquire().
future = sem.acquire()
self.assertFalse(future.done())
sem.release()
self.assertTrue(future.done())
# Value is 1.
sem.release()
self.assertRaises(ValueError, sem.release)
class LockTests(AsyncTestCase):
def test_repr(self):
lock = locks.Lock()
# No errors.
repr(lock)
lock.acquire()
repr(lock)
def test_acquire_release(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
future = lock.acquire()
self.assertFalse(future.done())
lock.release()
self.assertTrue(future.done())
@gen_test
def test_acquire_fifo(self):
lock = locks.Lock()
self.assertTrue(lock.acquire().done())
N = 5
history = []
@gen.coroutine
def f(idx):
with (yield lock.acquire()):
history.append(idx)
futures = [f(i) for i in range(N)]
self.assertFalse(any(future.done() for future in futures))
lock.release()
yield futures
self.assertEqual(list(range(N)), history)
@gen_test
def test_acquire_timeout(self):
lock = locks.Lock()
lock.acquire()
with self.assertRaises(gen.TimeoutError):
yield lock.acquire(timeout=timedelta(seconds=0.01))
# Still locked.
self.assertFalse(lock.acquire().done())
def test_multi_release(self):
lock = locks.Lock()
self.assertRaises(RuntimeError, lock.release)
lock.acquire()
lock.release()
self.assertRaises(RuntimeError, lock.release)
@gen_test
def test_yield_lock(self):
# Ensure we catch a "with (yield lock)", which should be
# "with (yield lock.acquire())".
with self.assertRaises(gen.BadYieldError):
with (yield locks.Lock()):
pass
def test_context_manager_misuse(self):
# Ensure we catch a "with lock", which should be
# "with (yield lock.acquire())".
with self.assertRaises(RuntimeError):
with locks.Lock():
pass
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Asynchronous Compression operations
*
* Copyright (c) 2016, Intel Corporation
* Authors: Weigang Li <weigang.li@intel.com>
* Giovanni Cabiddu <giovanni.cabiddu@intel.com>
*/
#include <crypto/internal/acompress.h>
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <net/netlink.h>
#include "compress.h"
struct crypto_scomp;
enum {
ACOMP_WALK_SLEEP = 1 << 0,
ACOMP_WALK_SRC_LINEAR = 1 << 1,
ACOMP_WALK_DST_LINEAR = 1 << 2,
};
static const struct crypto_type crypto_acomp_type;
static void acomp_reqchain_done(void *data, int err);
static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
{
return container_of(alg, struct acomp_alg, calg.base);
}
static inline struct acomp_alg *crypto_acomp_alg(struct crypto_acomp *tfm)
{
return __crypto_acomp_alg(crypto_acomp_tfm(tfm)->__crt_alg);
}
static int __maybe_unused crypto_acomp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_acomp racomp;
memset(&racomp, 0, sizeof(racomp));
strscpy(racomp.type, "acomp", sizeof(racomp.type));
return nla_put(skb, CRYPTOCFGA_REPORT_ACOMP, sizeof(racomp), &racomp);
}
static void __maybe_unused crypto_acomp_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_puts(m, "type : acomp\n");
}
static void crypto_acomp_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
if (alg->exit)
alg->exit(acomp);
if (acomp_is_async(acomp))
crypto_free_acomp(crypto_acomp_fb(acomp));
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_acomp *acomp = __crypto_acomp_tfm(tfm);
struct acomp_alg *alg = crypto_acomp_alg(acomp);
struct crypto_acomp *fb = NULL;
int err;
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
if (acomp_is_async(acomp)) {
fb = crypto_alloc_acomp(crypto_acomp_alg_name(acomp), 0,
CRYPTO_ALG_ASYNC);
if (IS_ERR(fb))
return PTR_ERR(fb);
err = -EINVAL;
if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
goto out_free_fb;
tfm->fb = crypto_acomp_tfm(fb);
}
acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
acomp->reqsize = alg->base.cra_reqsize;
acomp->base.exit = crypto_acomp_exit_tfm;
if (!alg->init)
return 0;
err = alg->init(acomp);
if (err)
goto out_free_fb;
return 0;
out_free_fb:
crypto_free_acomp(fb);
return err;
}
static unsigned int crypto_acomp_extsize(struct crypto_alg *alg)
{
int extsize = crypto_alg_extsize(alg);
if (alg->cra_type != &crypto_acomp_type)
extsize += sizeof(struct crypto_scomp *);
return extsize;
}
static const struct crypto_type crypto_acomp_type = {
.extsize = crypto_acomp_extsize,
.init_tfm = crypto_acomp_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_acomp_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_acomp_report,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_ACOMPRESS_MASK,
.type = CRYPTO_ALG_TYPE_ACOMPRESS,
.tfmsize = offsetof(struct crypto_acomp, base),
.algsize = offsetof(struct acomp_alg, base),
};
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_acomp_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
u32 mask, int node)
{
return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
node);
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
{
struct acomp_req_chain *state = &req->chain;
state->compl = req->base.complete;
state->data = req->base.data;
req->base.complete = cplt;
req->base.data = state;
}
static void acomp_restore_req(struct acomp_req *req)
{
struct acomp_req_chain *state = req->base.data;
req->base.complete = state->compl;
req->base.data = state->data;
}
static void acomp_reqchain_virt(struct acomp_req *req)
{
struct acomp_req_chain *state = &req->chain;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
if (state->flags & CRYPTO_ACOMP_REQ_SRC_VIRT)
acomp_request_set_src_dma(req, state->src, slen);
if (state->flags & CRYPTO_ACOMP_REQ_DST_VIRT)
acomp_request_set_dst_dma(req, state->dst, dlen);
}
static void acomp_virt_to_sg(struct acomp_req *req)
{
struct acomp_req_chain *state = &req->chain;
state->flags = req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
CRYPTO_ACOMP_REQ_DST_VIRT);
if (acomp_request_src_isvirt(req)) {
unsigned int slen = req->slen;
const u8 *svirt = req->svirt;
state->src = svirt;
sg_init_one(&state->ssg, svirt, slen);
acomp_request_set_src_sg(req, &state->ssg, slen);
}
if (acomp_request_dst_isvirt(req)) {
unsigned int dlen = req->dlen;
u8 *dvirt = req->dvirt;
state->dst = dvirt;
sg_init_one(&state->dsg, dvirt, dlen);
acomp_request_set_dst_sg(req, &state->dsg, dlen);
}
}
static int acomp_do_nondma(struct acomp_req *req, bool comp)
{
ACOMP_FBREQ_ON_STACK(fbreq, req);
int err;
if (comp)
err = crypto_acomp_compress(fbreq);
else
err = crypto_acomp_decompress(fbreq);
req->dlen = fbreq->dlen;
return err;
}
static int acomp_do_one_req(struct acomp_req *req, bool comp)
{
if (acomp_request_isnondma(req))
return acomp_do_nondma(req, comp);
acomp_virt_to_sg(req);
return comp ? crypto_acomp_reqtfm(req)->compress(req) :
crypto_acomp_reqtfm(req)->decompress(req);
}
static int acomp_reqchain_finish(struct acomp_req *req, int err)
{
acomp_reqchain_virt(req);
acomp_restore_req(req);
return err;
}
static void acomp_reqchain_done(void *data, int err)
{
struct acomp_req *req = data;
crypto_completion_t compl;
compl = req->chain.compl;
data = req->chain.data;
if (err == -EINPROGRESS)
goto notify;
err = acomp_reqchain_finish(req, err);
notify:
compl(data, err);
}
static int acomp_do_req_chain(struct acomp_req *req, bool comp)
{
int err;
acomp_save_req(req, acomp_reqchain_done);
err = acomp_do_one_req(req, comp);
if (err == -EBUSY || err == -EINPROGRESS)
return err;
return acomp_reqchain_finish(req, err);
}
int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
if (acomp_req_on_stack(req) && acomp_is_async(tfm))
return -EAGAIN;
if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
return crypto_acomp_reqtfm(req)->compress(req);
return acomp_do_req_chain(req, true);
}
EXPORT_SYMBOL_GPL(crypto_acomp_compress);
int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
if (acomp_req_on_stack(req) && acomp_is_async(tfm))
return -EAGAIN;
if (crypto_acomp_req_virt(tfm) || acomp_request_issg(req))
return crypto_acomp_reqtfm(req)->decompress(req);
return acomp_do_req_chain(req, false);
}
EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
void comp_prepare_alg(struct comp_alg_common *alg)
{
struct crypto_alg *base = &alg->base;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
}
int crypto_register_acomp(struct acomp_alg *alg)
{
struct crypto_alg *base = &alg->calg.base;
comp_prepare_alg(&alg->calg);
base->cra_type = &crypto_acomp_type;
base->cra_flags |= CRYPTO_ALG_TYPE_ACOMPRESS;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_acomp);
void crypto_unregister_acomp(struct acomp_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomp);
int crypto_register_acomps(struct acomp_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_acomp(&algs[i]);
if (ret) {
crypto_unregister_acomps(algs, i);
return ret;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_acomps);
void crypto_unregister_acomps(struct acomp_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_acomp(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_acomps);
static void acomp_stream_workfn(struct work_struct *work)
{
struct crypto_acomp_streams *s =
container_of(work, struct crypto_acomp_streams, stream_work);
struct crypto_acomp_stream __percpu *streams = s->streams;
int cpu;
for_each_cpu(cpu, &s->stream_want) {
struct crypto_acomp_stream *ps;
void *ctx;
ps = per_cpu_ptr(streams, cpu);
if (ps->ctx)
continue;
ctx = s->alloc_ctx();
if (IS_ERR(ctx))
break;
spin_lock_bh(&ps->lock);
ps->ctx = ctx;
spin_unlock_bh(&ps->lock);
cpumask_clear_cpu(cpu, &s->stream_want);
}
}
void crypto_acomp_free_streams(struct crypto_acomp_streams *s)
{
struct crypto_acomp_stream __percpu *streams = s->streams;
void (*free_ctx)(void *);
int i;
s->streams = NULL;
if (!streams)
return;
cancel_work_sync(&s->stream_work);
free_ctx = s->free_ctx;
for_each_possible_cpu(i) {
struct crypto_acomp_stream *ps = per_cpu_ptr(streams, i);
if (!ps->ctx)
continue;
free_ctx(ps->ctx);
}
free_percpu(streams);
}
EXPORT_SYMBOL_GPL(crypto_acomp_free_streams);
int crypto_acomp_alloc_streams(struct crypto_acomp_streams *s)
{
struct crypto_acomp_stream __percpu *streams;
struct crypto_acomp_stream *ps;
unsigned int i;
void *ctx;
if (s->streams)
return 0;
streams = alloc_percpu(struct crypto_acomp_stream);
if (!streams)
return -ENOMEM;
ctx = s->alloc_ctx();
if (IS_ERR(ctx)) {
free_percpu(streams);
return PTR_ERR(ctx);
}
i = cpumask_first(cpu_possible_mask);
ps = per_cpu_ptr(streams, i);
ps->ctx = ctx;
for_each_possible_cpu(i) {
ps = per_cpu_ptr(streams, i);
spin_lock_init(&ps->lock);
}
s->streams = streams;
INIT_WORK(&s->stream_work, acomp_stream_workfn);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_acomp_alloc_streams);
struct crypto_acomp_stream *_crypto_acomp_lock_stream_bh(
struct crypto_acomp_streams *s)
{
struct crypto_acomp_stream __percpu *streams = s->streams;
int cpu = raw_smp_processor_id();
struct crypto_acomp_stream *ps;
ps = per_cpu_ptr(streams, cpu);
spin_lock_bh(&ps->lock);
if (likely(ps->ctx))
return ps;
spin_unlock(&ps->lock);
cpumask_set_cpu(cpu, &s->stream_want);
schedule_work(&s->stream_work);
ps = per_cpu_ptr(streams, cpumask_first(cpu_possible_mask));
spin_lock(&ps->lock);
return ps;
}
EXPORT_SYMBOL_GPL(_crypto_acomp_lock_stream_bh);
void acomp_walk_done_src(struct acomp_walk *walk, int used)
{
walk->slen -= used;
if ((walk->flags & ACOMP_WALK_SRC_LINEAR))
scatterwalk_advance(&walk->in, used);
else
scatterwalk_done_src(&walk->in, used);
if ((walk->flags & ACOMP_WALK_SLEEP))
cond_resched();
}
EXPORT_SYMBOL_GPL(acomp_walk_done_src);
void acomp_walk_done_dst(struct acomp_walk *walk, int used)
{
walk->dlen -= used;
if ((walk->flags & ACOMP_WALK_DST_LINEAR))
scatterwalk_advance(&walk->out, used);
else
scatterwalk_done_dst(&walk->out, used);
if ((walk->flags & ACOMP_WALK_SLEEP))
cond_resched();
}
EXPORT_SYMBOL_GPL(acomp_walk_done_dst);
int acomp_walk_next_src(struct acomp_walk *walk)
{
unsigned int slen = walk->slen;
unsigned int max = UINT_MAX;
if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
max = PAGE_SIZE;
if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
walk->in.__addr = (void *)(((u8 *)walk->in.sg) +
walk->in.offset);
return min(slen, max);
}
return slen ? scatterwalk_next(&walk->in, slen) : 0;
}
EXPORT_SYMBOL_GPL(acomp_walk_next_src);
int acomp_walk_next_dst(struct acomp_walk *walk)
{
unsigned int dlen = walk->dlen;
unsigned int max = UINT_MAX;
if (!preempt_model_preemptible() && (walk->flags & ACOMP_WALK_SLEEP))
max = PAGE_SIZE;
if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
walk->out.__addr = (void *)(((u8 *)walk->out.sg) +
walk->out.offset);
return min(dlen, max);
}
return dlen ? scatterwalk_next(&walk->out, dlen) : 0;
}
EXPORT_SYMBOL_GPL(acomp_walk_next_dst);
int acomp_walk_virt(struct acomp_walk *__restrict walk,
struct acomp_req *__restrict req, bool atomic)
{
struct scatterlist *src = req->src;
struct scatterlist *dst = req->dst;
walk->slen = req->slen;
walk->dlen = req->dlen;
if (!walk->slen || !walk->dlen)
return -EINVAL;
walk->flags = 0;
if ((req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !atomic)
walk->flags |= ACOMP_WALK_SLEEP;
if ((req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT))
walk->flags |= ACOMP_WALK_SRC_LINEAR;
if ((req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT))
walk->flags |= ACOMP_WALK_DST_LINEAR;
if ((walk->flags & ACOMP_WALK_SRC_LINEAR)) {
walk->in.sg = (void *)req->svirt;
walk->in.offset = 0;
} else
scatterwalk_start(&walk->in, src);
if ((walk->flags & ACOMP_WALK_DST_LINEAR)) {
walk->out.sg = (void *)req->dvirt;
walk->out.offset = 0;
} else
scatterwalk_start(&walk->out, dst);
return 0;
}
EXPORT_SYMBOL_GPL(acomp_walk_virt);
struct acomp_req *acomp_request_clone(struct acomp_req *req,
size_t total, gfp_t gfp)
{
struct acomp_req *nreq;
nreq = container_of(crypto_request_clone(&req->base, total, gfp),
struct acomp_req, base);
if (nreq == req)
return req;
if (req->src == &req->chain.ssg)
nreq->src = &nreq->chain.ssg;
if (req->dst == &req->chain.dsg)
nreq->dst = &nreq->chain.dsg;
return nreq;
}
EXPORT_SYMBOL_GPL(acomp_request_clone);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type"); | c | github | https://github.com/torvalds/linux | crypto/acompress.c |
# use this to debug issues
#- debug: msg={{ is_private ~ ', ' ~ is_default ~ ', ' ~ privacy|default('nope')}}
- hosts: localhost
name: test global privacy setting
gather_facts: false
roles:
- a
pre_tasks:
- name: 'test roles: privacy'
assert:
that:
- is_private and privacy is undefined or not is_private and privacy is defined
- not is_default or is_default and privacy is defined
- hosts: localhost
name: test import_role privacy
gather_facts: false
tasks:
- import_role: name=a
- name: role is private, var should be undefined
assert:
that:
- is_private and privacy is undefined or not is_private and privacy is defined
- not is_default or is_default and privacy is defined
- hosts: localhost
name: test public no always overrides global on import_role
gather_facts: false
tasks:
- import_role: name=a public=no
- name: role is private, var should be undefined
assert:
that:
- privacy is undefined
- hosts: localhost
name: test public yes always overrides global on import_role
gather_facts: false
tasks:
- import_role: name=a public=yes
- name: role is private, var should be undefined
assert:
that:
- privacy is defined
- hosts: localhost
name: test global privacy setting on includes
gather_facts: false
tasks:
- include_role: name=a
- name: test include_role privacy
assert:
that:
- not is_default and (is_private and privacy is undefined or not is_private and privacy is defined) or is_default and privacy is undefined
- hosts: localhost
name: test public yes always overrides global privacy setting on includes
gather_facts: false
tasks:
- include_role: name=a public=yes
- name: test include_role privacy
assert:
that:
- privacy is defined
- hosts: localhost
name: test public no always overrides global privacy setting on includes
gather_facts: false
tasks:
- include_role: name=a public=no
- name: test include_role privacy
assert:
that:
- privacy is undefined | unknown | github | https://github.com/ansible/ansible | test/integration/targets/roles/privacy.yml |
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett Packard.
import sys
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.tests.unit import test_cli20
class CLITestV20VpnIpsecPolicyJSON(test_cli20.CLITestV20Base):
def test_create_ipsecpolicy_all_params(self):
"""vpn-ipsecpolicy-create all params with dashes."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'first-ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'ah'
tenant_id = 'my-tenant'
my_id = 'my-id'
lifetime = 'units=seconds,value=20000'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--transform-protocol', transform_protocol,
'--encapsulation-mode', encapsulation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode', 'description',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode, description,
transform_protocol, pfs,
tenant_id]
extra_body = {
'lifetime': {
'units': 'seconds',
'value': 20000,
},
}
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
extra_body=extra_body)
def test_create_ipsecpolicy_with_limited_params(self):
"""vpn-ipsecpolicy-create with limited params."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-128'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'esp'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--tenant-id', tenant_id]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode,
transform_protocol, pfs,
tenant_id]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def _test_lifetime_values(self, lifetime):
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'my-ipsec-policy'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
ike_version = 'v1'
phase1_negotiation_mode = 'main'
pfs = 'group5'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--ike-version', ike_version,
'--phase1-negotiation-mode', phase1_negotiation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'description',
'auth_algorithm', 'encryption_algorithm',
'phase1_negotiation_mode',
'ike_version', 'pfs',
'tenant_id']
position_values = [name, description,
auth_algorithm, encryption_algorithm,
phase1_negotiation_mode, ike_version, pfs,
tenant_id]
try:
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
except Exception:
return
self.fail("IPsecPolicy Lifetime Error")
def test_create_ipsecpolicy_with_invalid_lifetime_keys(self):
lifetime = 'uts=seconds,val=20000'
self._test_lifetime_values(lifetime)
def test_create_ipsecpolicy_with_invalide_lifetime_values(self):
lifetime = 'units=minutes,value=0'
self._test_lifetime_values(lifetime)
def test_list_ipsecpolicy(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ipsecpolicy_pagination(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ipsecpolicy_sort(self):
"""vpn-ipsecpolicy-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ipsecpolicy_limit(self):
"""vpn-ipsecpolicy-list -P."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_ipsecpolicy_id(self):
"""vpn-ipsecpolicy-show ipsecpolicy_id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_ipsecpolicy_id_name(self):
"""vpn-ipsecpolicy-show."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_ipsecpolicy(self):
"""vpn-ipsecpolicy-update myid --name newname --tags a b."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_delete_ipsecpolicy(self):
"""vpn-ipsecpolicy-delete my-id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.DeleteIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
class CLITestV20VpnIpsecPolicyXML(CLITestV20VpnIpsecPolicyJSON):
format = 'xml' | unknown | codeparrot/codeparrot-clean | ||
import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg: return repr(arg)
if arg.split()<>[arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args)<>1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print "Command Aliases"
print "---------------"
for alias in aliases:
print "setup.py alias", format_alias(alias, aliases)
return
elif len(self.args)==1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print "setup.py alias", format_alias(alias, aliases)
return
else:
print "No alias definition found for %r" % alias
return
else:
alias = self.args[0]
command = ' '.join(map(shquote,self.args[1:]))
edit_config(self.filename, {'aliases': {alias:command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source+name+' '+command | unknown | codeparrot/codeparrot-clean | ||
package middleware
import (
"context"
"net/http"
)
// Middleware is an interface to allow the use of ordinary functions as Docker API filters.
// Any struct that has the appropriate signature can be registered as a middleware.
type Middleware interface {
WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error
} | go | github | https://github.com/moby/moby | daemon/server/middleware/middleware.go |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Bootstrap script to clone and forward to the recipe engine tool."""
import ast
import logging
import os
import random
import re
import subprocess
import sys
import time
import traceback
BOOTSTRAP_VERSION = 1
# The root of the repository relative to the directory of this file.
REPO_ROOT = ''
# The path of the recipes.cfg file relative to the root of the repository.
RECIPES_CFG = os.path.join('infra', 'config', 'recipes.cfg')
def parse_protobuf(fh):
"""Parse the protobuf text format just well enough to understand recipes.cfg.
We don't use the protobuf library because we want to be as self-contained
as possible in this bootstrap, so it can be simply vendored into a client
repo.
We assume all fields are repeated since we don't have a proto spec to work
with.
Args:
fh: a filehandle containing the text format protobuf.
Returns:
A recursive dictionary of lists.
"""
def parse_atom(text):
if text == 'true': return True
if text == 'false': return False
return ast.literal_eval(text)
ret = {}
for line in fh:
line = line.strip()
m = re.match(r'(\w+)\s*:\s*(.*)', line)
if m:
ret.setdefault(m.group(1), []).append(parse_atom(m.group(2)))
continue
m = re.match(r'(\w+)\s*{', line)
if m:
subparse = parse_protobuf(fh)
ret.setdefault(m.group(1), []).append(subparse)
continue
if line == '}': return ret
if line == '': continue
raise Exception('Could not understand line: <%s>' % line)
return ret
def get_unique(things):
if len(things) == 1:
return things[0]
elif len(things) == 0:
raise ValueError("Expected to get one thing, but dinna get none.")
else:
logging.warn('Expected to get one thing, but got a bunch: %s\n%s' %
(things, traceback.format_stack()))
return things[0]
def main():
if sys.platform.startswith(('win', 'cygwin')):
git = 'git.bat'
else:
git = 'git'
# Find the repository and config file to operate on.
repo_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), REPO_ROOT))
recipes_cfg_path = os.path.join(repo_root, RECIPES_CFG)
with open(recipes_cfg_path, 'rU') as fh:
protobuf = parse_protobuf(fh)
engine_buf = get_unique([
b for b in protobuf['deps'] if b.get('project_id') == ['recipe_engine'] ])
engine_url = get_unique(engine_buf['url'])
engine_revision = get_unique(engine_buf['revision'])
engine_subpath = (get_unique(engine_buf.get('path_override', ['']))
.replace('/', os.path.sep))
recipes_path = os.path.join(repo_root,
get_unique(protobuf['recipes_path']).replace('/', os.path.sep))
deps_path = os.path.join(recipes_path, '.recipe_deps')
engine_path = os.path.join(deps_path, 'recipe_engine')
# Ensure that we have the recipe engine cloned.
def ensure_engine():
if not os.path.exists(deps_path):
os.makedirs(deps_path)
if not os.path.exists(engine_path):
subprocess.check_call([git, 'clone', engine_url, engine_path])
needs_fetch = subprocess.call(
[git, 'rev-parse', '--verify', '%s^{commit}' % engine_revision],
cwd=engine_path, stdout=open(os.devnull, 'w'))
if needs_fetch:
subprocess.check_call([git, 'fetch'], cwd=engine_path)
subprocess.check_call(
[git, 'checkout', '--quiet', engine_revision], cwd=engine_path)
try:
ensure_engine()
except subprocess.CalledProcessError as e:
if e.returncode == 128: # Thrown when git gets a lock error.
time.sleep(random.uniform(2,5))
ensure_engine()
else:
raise
args = ['--package', recipes_cfg_path,
'--bootstrap-script', __file__] + sys.argv[1:]
return subprocess.call([
sys.executable, '-u',
os.path.join(engine_path, engine_subpath, 'recipes.py')] + args)
if __name__ == '__main__':
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
{
"css": null,
"js": [],
"start": 0,
"end": 204,
"type": "Root",
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "RegularElement",
"start": 0,
"end": 20,
"name": "div",
"name_loc": {
"start": {
"line": 1,
"column": 1,
"character": 1
},
"end": {
"line": 1,
"column": 4,
"character": 4
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 5,
"end": 7,
"raw": "\n\t",
"data": "\n\t"
},
{
"type": "Component",
"start": 7,
"end": 14,
"name": "Comp",
"name_loc": {
"start": {
"line": 2,
"column": 2,
"character": 8
},
"end": {
"line": 2,
"column": 6,
"character": 12
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 13,
"end": 14,
"raw": "\n",
"data": "\n"
}
]
}
}
]
}
},
{
"type": "Text",
"start": 20,
"end": 22,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "RegularElement",
"start": 22,
"end": 51,
"name": "div",
"name_loc": {
"start": {
"line": 5,
"column": 1,
"character": 23
},
"end": {
"line": 5,
"column": 4,
"character": 26
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 27,
"end": 29,
"raw": "\n\t",
"data": "\n\t"
},
{
"type": "Component",
"start": 29,
"end": 45,
"name": "Comp",
"name_loc": {
"start": {
"line": 6,
"column": 2,
"character": 30
},
"end": {
"line": 6,
"column": 6,
"character": 34
}
},
"attributes": [
{
"type": "Attribute",
"start": 35,
"end": 44,
"name": "foo",
"name_loc": {
"start": {
"line": 6,
"column": 7,
"character": 35
},
"end": {
"line": 6,
"column": 10,
"character": 38
}
},
"value": {
"type": "ExpressionTag",
"start": 39,
"end": 44,
"expression": {
"type": "Identifier",
"start": 40,
"end": 43,
"loc": {
"start": {
"line": 6,
"column": 12
},
"end": {
"line": 6,
"column": 15
}
},
"name": "bar"
}
}
}
],
"fragment": {
"type": "Fragment",
"nodes": []
}
}
]
}
},
{
"type": "Text",
"start": 51,
"end": 53,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "RegularElement",
"start": 53,
"end": 72,
"name": "div",
"name_loc": {
"start": {
"line": 9,
"column": 1,
"character": 54
},
"end": {
"line": 9,
"column": 4,
"character": 57
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 58,
"end": 60,
"raw": "\n\t",
"data": "\n\t"
},
{
"type": "RegularElement",
"start": 60,
"end": 66,
"name": "span",
"name_loc": {
"start": {
"line": 10,
"column": 2,
"character": 61
},
"end": {
"line": 10,
"column": 6,
"character": 65
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": []
}
}
]
}
},
{
"type": "Text",
"start": 72,
"end": 74,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "RegularElement",
"start": 74,
"end": 94,
"name": "div",
"name_loc": {
"start": {
"line": 13,
"column": 1,
"character": 75
},
"end": {
"line": 13,
"column": 4,
"character": 78
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 79,
"end": 81,
"raw": "\n\t",
"data": "\n\t"
},
{
"type": "Component",
"start": 81,
"end": 88,
"name": "Comp.",
"name_loc": {
"start": {
"line": 14,
"column": 2,
"character": 82
},
"end": {
"line": 14,
"column": 7,
"character": 87
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": []
}
}
]
}
},
{
"type": "Text",
"start": 94,
"end": 96,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "RegularElement",
"start": 96,
"end": 116,
"name": "div",
"name_loc": {
"start": {
"line": 17,
"column": 1,
"character": 97
},
"end": {
"line": 17,
"column": 4,
"character": 100
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 101,
"end": 103,
"raw": "\n\t",
"data": "\n\t"
},
{
"type": "Component",
"start": 103,
"end": 110,
"name": "comp.",
"name_loc": {
"start": {
"line": 18,
"column": 2,
"character": 104
},
"end": {
"line": 18,
"column": 7,
"character": 109
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": []
}
}
]
}
},
{
"type": "Text",
"start": 116,
"end": 118,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "IfBlock",
"elseif": false,
"start": 118,
"end": 140,
"test": {
"type": "Identifier",
"start": 123,
"end": 126,
"loc": {
"start": {
"line": 21,
"column": 5
},
"end": {
"line": 21,
"column": 8
}
},
"name": "foo"
},
"consequent": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 127,
"end": 129,
"raw": "\n\t",
"data": "\n\t"
},
{
"type": "RegularElement",
"start": 129,
"end": 135,
"name": "div",
"name_loc": {
"start": {
"line": 22,
"column": 2,
"character": 130
},
"end": {
"line": 22,
"column": 5,
"character": 133
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 134,
"end": 135,
"raw": "\n",
"data": "\n"
}
]
}
}
]
},
"alternate": null
},
{
"type": "Text",
"start": 140,
"end": 142,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "IfBlock",
"elseif": false,
"start": 142,
"end": 174,
"test": {
"type": "Identifier",
"start": 147,
"end": 150,
"loc": {
"start": {
"line": 25,
"column": 5
},
"end": {
"line": 25,
"column": 8
}
},
"name": "foo"
},
"consequent": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 151,
"end": 153,
"raw": "\n\t",
"data": "\n\t"
},
{
"type": "Component",
"start": 153,
"end": 169,
"name": "Comp",
"name_loc": {
"start": {
"line": 26,
"column": 2,
"character": 154
},
"end": {
"line": 26,
"column": 6,
"character": 158
}
},
"attributes": [
{
"type": "Attribute",
"start": 159,
"end": 168,
"name": "foo",
"name_loc": {
"start": {
"line": 26,
"column": 7,
"character": 159
},
"end": {
"line": 26,
"column": 10,
"character": 162
}
},
"value": {
"type": "ExpressionTag",
"start": 163,
"end": 168,
"expression": {
"type": "Identifier",
"start": 164,
"end": 167,
"loc": {
"start": {
"line": 26,
"column": 12
},
"end": {
"line": 26,
"column": 15
}
},
"name": "bar"
}
}
}
],
"fragment": {
"type": "Fragment",
"nodes": []
}
}
]
},
"alternate": null
},
{
"type": "Text",
"start": 174,
"end": 176,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "RegularElement",
"start": 176,
"end": 204,
"name": "div",
"name_loc": {
"start": {
"line": 29,
"column": 1,
"character": 177
},
"end": {
"line": 29,
"column": 4,
"character": 180
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 181,
"end": 182,
"raw": "\n",
"data": "\n"
},
{
"type": "RegularElement",
"start": 182,
"end": 191,
"name": "p",
"name_loc": {
"start": {
"line": 30,
"column": 1,
"character": 183
},
"end": {
"line": 30,
"column": 2,
"character": 184
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": [
{
"type": "Text",
"start": 185,
"end": 187,
"raw": "hi",
"data": "hi"
}
]
}
},
{
"type": "Text",
"start": 191,
"end": 193,
"raw": "\n\n",
"data": "\n\n"
},
{
"type": "RegularElement",
"start": 193,
"end": 204,
"name": "open-ended",
"name_loc": {
"start": {
"line": 32,
"column": 1,
"character": 194
},
"end": {
"line": 32,
"column": 11,
"character": 204
}
},
"attributes": [],
"fragment": {
"type": "Fragment",
"nodes": []
}
}
]
}
}
]
},
"options": null
} | json | github | https://github.com/sveltejs/svelte | packages/svelte/tests/parser-modern/samples/loose-unclosed-tag/output.json |
"""
Command to manually re-post open ended submissions to the grader.
"""
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from optparse import make_option
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from courseware.courses import get_course
from instructor.utils import get_module_for_student
class Command(BaseCommand):
"""
Command to manually re-post open ended submissions to the grader.
"""
help = ("Usage: openended_post <course_id> <problem_location> <student_ids.txt> <hostname> --dry-run --task-number=<task_number>\n"
"The text file should contain a User.id in each line.")
option_list = BaseCommand.option_list + (
make_option('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except send the submission to the grader. "),
make_option('--task-number',
type='int', default=0,
help="Task number that needs to be submitted."),
)
def handle(self, *args, **options):
dry_run = options['dry_run']
task_number = options['task_number']
if len(args) == 4:
course_id = SlashSeparatedCourseKey.from_deprecated_string(args[0])
location = course_id.make_usage_key_from_deprecated_string(args[1])
students_ids = [line.strip() for line in open(args[2])]
hostname = args[3]
else:
print self.help
return
try:
course = get_course(course_id)
except ValueError as err:
print err
return
descriptor = modulestore().get_item(location, depth=0)
if descriptor is None:
print "Location not found in course"
return
if dry_run:
print "Doing a dry run."
students = User.objects.filter(id__in=students_ids).order_by('username')
print "Number of students: {0}".format(students.count())
for student in students:
post_submission_for_student(student, course, location, task_number, dry_run=dry_run, hostname=hostname)
def post_submission_for_student(student, course, location, task_number, dry_run=True, hostname=None):
"""If the student's task child_state is ASSESSING post submission to grader."""
print "{0}:{1}".format(student.id, student.username)
request = DummyRequest()
request.user = student
request.host = hostname
try:
module = get_module_for_student(student, location, request=request)
if module is None:
print " WARNING: No state found."
return False
latest_task = module.child_module.get_task_number(task_number)
if latest_task is None:
print " WARNING: No task state found."
return False
if not isinstance(latest_task, OpenEndedModule):
print " ERROR: Not an OpenEndedModule task."
return False
latest_task_state = latest_task.child_state
if latest_task_state == OpenEndedChild.INITIAL:
print " WARNING: No submission."
elif latest_task_state == OpenEndedChild.POST_ASSESSMENT or latest_task_state == OpenEndedChild.DONE:
print " WARNING: Submission already graded."
elif latest_task_state == OpenEndedChild.ASSESSING:
latest_answer = latest_task.latest_answer()
if dry_run:
print " Skipped sending submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
else:
latest_task.send_to_grader(latest_answer, latest_task.system)
print " Sent submission to grader: {0!r}".format(latest_answer[:100].encode('utf-8'))
return True
else:
print "WARNING: Invalid task_state: {0}".format(latest_task_state)
except Exception as err: # pylint: disable=broad-except
print err
return False
class DummyRequest(object):
"""Dummy request"""
META = {}
def __init__(self):
self.session = {}
self.user = None
self.host = None
self.secure = True
def get_host(self):
"""Return a default host."""
return self.host
def is_secure(self):
"""Always secure."""
return self.secure | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\Tests\DependencyInjection\Compiler;
use PHPUnit\Framework\TestCase;
use Symfony\Bundle\FrameworkBundle\DependencyInjection\Compiler\UnusedTagsPass;
use Symfony\Component\DependencyInjection\ContainerBuilder;
class UnusedTagsPassTest extends TestCase
{
public function testProcess()
{
$pass = new UnusedTagsPass();
$container = new ContainerBuilder();
$container->register('foo')
->addTag('kenrel.event_subscriber');
$container->register('bar')
->addTag('kenrel.event_subscriber');
$pass->process($container);
$this->assertSame([\sprintf('%s: Tag "kenrel.event_subscriber" was defined on service(s) "foo", "bar", but was never used. Did you mean "kernel.event_subscriber"?', UnusedTagsPass::class)], $container->getCompiler()->getLog());
}
public function testMissingKnownTags()
{
if (\dirname((new \ReflectionClass(ContainerBuilder::class))->getFileName(), 3) !== \dirname(__DIR__, 5)) {
$this->markTestSkipped('Tests are not run from the root symfony/symfony metapackage.');
}
$this->assertSame(UnusedTagsPassUtils::getDefinedTags(), $this->getKnownTags(), 'The src/Symfony/Bundle/FrameworkBundle/DependencyInjection/Compiler/UnusedTagsPass.php file must be updated; run src/Symfony/Bundle/FrameworkBundle/Resources/bin/check-unused-known-tags.php.');
}
private function getKnownTags(): array
{
$tags = \Closure::bind(
static fn () => UnusedTagsPass::KNOWN_TAGS,
null,
UnusedTagsPass::class
)();
sort($tags);
return $tags;
}
} | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/DependencyInjection/Compiler/UnusedTagsPassTest.php |
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The exos legacy fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
import json
from ansible.module_utils.network.exos.exos import run_commands
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.warnings = list()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, self.COMMANDS)
def run(self, cmd):
return run_commands(self.module, cmd)
class Default(FactsBase):
COMMANDS = [
'show version',
'show switch'
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
if data:
self.facts['version'] = self.parse_version(data)
self.facts['serialnum'] = self.parse_serialnum(data)
data = self.responses[1]
if data:
self.facts['model'] = self.parse_model(data)
self.facts['hostname'] = self.parse_hostname(data)
def parse_version(self, data):
match = re.search(r'Image\s+: ExtremeXOS version (\S+)', data)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'System Type:\s+(.*$)', data, re.M)
if match:
return match.group(1)
def parse_hostname(self, data):
match = re.search(r'SysName:\s+(\S+)', data, re.M)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'Switch\s+: \S+ (\S+)', data, re.M)
if match:
return match.group(1)
# For stack, return serial number of the first switch in the stack.
match = re.search(r'Slot-\d+\s+: \S+ (\S+)', data, re.M)
if match:
return match.group(1)
# Handle unique formatting for VM
match = re.search(r'Switch\s+: PN:\S+\s+SN:(\S+)', data, re.M)
if match:
return match.group(1)
class Hardware(FactsBase):
COMMANDS = [
'show memory'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.facts['memtotal_mb'] = int(round(int(self.parse_memtotal(data)) / 1024, 0))
self.facts['memfree_mb'] = int(round(int(self.parse_memfree(data)) / 1024, 0))
def parse_memtotal(self, data):
match = re.search(r' Total DRAM \(KB\): (\d+)', data, re.M)
if match:
return match.group(1)
# Handle unique formatting for VM
match = re.search(r' Total \s+\(KB\): (\d+)', data, re.M)
if match:
return match.group(1)
def parse_memfree(self, data):
match = re.search(r' Free\s+\(KB\): (\d+)', data, re.M)
if match:
return match.group(1)
class Config(FactsBase):
COMMANDS = ['show configuration detail']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = [
'show switch',
{'command': 'show port config', 'output': 'json'},
{'command': 'show port description', 'output': 'json'},
{'command': 'show vlan detail', 'output': 'json'},
{'command': 'show lldp neighbors', 'output': 'json'}
]
def populate(self):
super(Interfaces, self).populate()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.responses[0]
if data:
sysmac = self.parse_sysmac(data)
data = self.responses[1]
if data:
self.facts['interfaces'] = self.populate_interfaces(data, sysmac)
data = self.responses[2]
if data:
self.populate_interface_descriptions(data)
data = self.responses[3]
if data:
self.populate_vlan_interfaces(data, sysmac)
data = self.responses[4]
if data:
self.facts['neighbors'] = self.parse_neighbors(data)
def parse_sysmac(self, data):
match = re.search(r'System MAC:\s+(\S+)', data, re.M)
if match:
return match.group(1)
def populate_interfaces(self, interfaces, sysmac):
facts = dict()
for elem in interfaces:
intf = dict()
if 'show_ports_config' not in elem:
continue
key = str(elem['show_ports_config']['port'])
if elem['show_ports_config']['linkState'] == 2:
# Link state is "not present", don't include
continue
intf['type'] = 'Ethernet'
intf['macaddress'] = sysmac
intf['bandwidth_configured'] = str(elem['show_ports_config']['speedCfg'])
intf['bandwidth'] = str(elem['show_ports_config']['speedActual'])
intf['duplex_configured'] = elem['show_ports_config']['duplexCfg']
intf['duplex'] = elem['show_ports_config']['duplexActual']
if elem['show_ports_config']['linkState'] == 1:
intf['lineprotocol'] = 'up'
else:
intf['lineprotocol'] = 'down'
if elem['show_ports_config']['portState'] == 1:
intf['operstatus'] = 'up'
else:
intf['operstatus'] = 'admin down'
facts[key] = intf
return facts
def populate_interface_descriptions(self, data):
for elem in data:
if 'show_ports_description' not in elem:
continue
key = str(elem['show_ports_description']['port'])
if 'descriptionString' in elem['show_ports_description']:
desc = elem['show_ports_description']['descriptionString']
self.facts['interfaces'][key]['description'] = desc
def populate_vlan_interfaces(self, data, sysmac):
for elem in data:
if 'vlanProc' in elem:
key = elem['vlanProc']['name1']
if key not in self.facts['interfaces']:
intf = dict()
intf['type'] = 'VLAN'
intf['macaddress'] = sysmac
self.facts['interfaces'][key] = intf
if elem['vlanProc']['ipAddress'] != '0.0.0.0':
self.facts['interfaces'][key]['ipv4'] = list()
addr = elem['vlanProc']['ipAddress']
subnet = elem['vlanProc']['maskForDisplay']
ipv4 = dict(address=addr, subnet=subnet)
self.add_ip_address(addr, 'ipv4')
self.facts['interfaces'][key]['ipv4'].append(ipv4)
if 'rtifIpv6Address' in elem:
key = elem['rtifIpv6Address']['rtifName']
if key not in self.facts['interfaces']:
intf = dict()
intf['type'] = 'VLAN'
intf['macaddress'] = sysmac
self.facts['interfaces'][key] = intf
self.facts['interfaces'][key]['ipv6'] = list()
addr, subnet = elem['rtifIpv6Address']['ipv6_address_mask'].split('/')
ipv6 = dict(address=addr, subnet=subnet)
self.add_ip_address(addr, 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
if address not in self.facts['all_ipv4_addresses']:
self.facts['all_ipv4_addresses'].append(address)
else:
if address not in self.facts['all_ipv6_addresses']:
self.facts['all_ipv6_addresses'].append(address)
def parse_neighbors(self, data):
facts = dict()
for elem in data:
if 'lldpPortNbrInfoShort' not in elem:
continue
intf = str(elem['lldpPortNbrInfoShort']['port'])
if intf not in facts:
facts[intf] = list()
fact = dict()
fact['host'] = elem['lldpPortNbrInfoShort']['nbrSysName']
fact['port'] = str(elem['lldpPortNbrInfoShort']['nbrPortID'])
facts[intf].append(fact)
return facts | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright (C) 2009 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import static com.google.common.util.concurrent.Platform.restoreInterruptIfIsInterruptedException;
import static com.google.common.util.concurrent.Service.State.FAILED;
import static com.google.common.util.concurrent.Service.State.NEW;
import static com.google.common.util.concurrent.Service.State.RUNNING;
import static com.google.common.util.concurrent.Service.State.STARTING;
import static com.google.common.util.concurrent.Service.State.STOPPING;
import static com.google.common.util.concurrent.Service.State.TERMINATED;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.GwtIncompatible;
import com.google.common.annotations.J2ktIncompatible;
import com.google.common.util.concurrent.Monitor.Guard;
import com.google.common.util.concurrent.Service.State;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.errorprone.annotations.ForOverride;
import com.google.errorprone.annotations.concurrent.GuardedBy;
import com.google.j2objc.annotations.WeakOuter;
import java.time.Duration;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.jspecify.annotations.Nullable;
/**
* Base class for implementing services that can handle {@link #doStart} and {@link #doStop}
* requests, responding to them with {@link #notifyStarted()} and {@link #notifyStopped()}
* callbacks. Its subclasses must manage threads manually; consider {@link
* AbstractExecutionThreadService} if you need only a single execution thread.
*
* @author Jesse Wilson
* @author Luke Sandberg
* @since 1.0
*/
@GwtIncompatible
@J2ktIncompatible
public abstract class AbstractService implements Service {
private static final ListenerCallQueue.Event<Listener> STARTING_EVENT =
new ListenerCallQueue.Event<Listener>() {
@Override
public void call(Listener listener) {
listener.starting();
}
@Override
public String toString() {
return "starting()";
}
};
private static final ListenerCallQueue.Event<Listener> RUNNING_EVENT =
new ListenerCallQueue.Event<Listener>() {
@Override
public void call(Listener listener) {
listener.running();
}
@Override
public String toString() {
return "running()";
}
};
private static final ListenerCallQueue.Event<Listener> STOPPING_FROM_STARTING_EVENT =
stoppingEvent(STARTING);
private static final ListenerCallQueue.Event<Listener> STOPPING_FROM_RUNNING_EVENT =
stoppingEvent(RUNNING);
private static final ListenerCallQueue.Event<Listener> TERMINATED_FROM_NEW_EVENT =
terminatedEvent(NEW);
private static final ListenerCallQueue.Event<Listener> TERMINATED_FROM_STARTING_EVENT =
terminatedEvent(STARTING);
private static final ListenerCallQueue.Event<Listener> TERMINATED_FROM_RUNNING_EVENT =
terminatedEvent(RUNNING);
private static final ListenerCallQueue.Event<Listener> TERMINATED_FROM_STOPPING_EVENT =
terminatedEvent(STOPPING);
private static ListenerCallQueue.Event<Listener> terminatedEvent(State from) {
return new ListenerCallQueue.Event<Listener>() {
@Override
public void call(Listener listener) {
listener.terminated(from);
}
@Override
public String toString() {
return "terminated({from = " + from + "})";
}
};
}
private static ListenerCallQueue.Event<Listener> stoppingEvent(State from) {
return new ListenerCallQueue.Event<Listener>() {
@Override
public void call(Listener listener) {
listener.stopping(from);
}
@Override
public String toString() {
return "stopping({from = " + from + "})";
}
};
}
private final Monitor monitor = new Monitor();
private final Guard isStartable = new IsStartableGuard();
@WeakOuter
private final class IsStartableGuard extends Guard {
IsStartableGuard() {
super(AbstractService.this.monitor);
}
@Override
public boolean isSatisfied() {
return state() == NEW;
}
}
private final Guard isStoppable = new IsStoppableGuard();
@WeakOuter
private final class IsStoppableGuard extends Guard {
IsStoppableGuard() {
super(AbstractService.this.monitor);
}
@Override
public boolean isSatisfied() {
return state().compareTo(RUNNING) <= 0;
}
}
private final Guard hasReachedRunning = new HasReachedRunningGuard();
@WeakOuter
private final class HasReachedRunningGuard extends Guard {
HasReachedRunningGuard() {
super(AbstractService.this.monitor);
}
@Override
public boolean isSatisfied() {
return state().compareTo(RUNNING) >= 0;
}
}
private final Guard isStopped = new IsStoppedGuard();
@WeakOuter
private final class IsStoppedGuard extends Guard {
IsStoppedGuard() {
super(AbstractService.this.monitor);
}
@Override
public boolean isSatisfied() {
return state().compareTo(TERMINATED) >= 0;
}
}
/** The listeners to notify during a state transition. */
private final ListenerCallQueue<Listener> listeners = new ListenerCallQueue<>();
/**
* The current state of the service. This should be written with the lock held but can be read
* without it because it is an immutable object in a volatile field. This is desirable so that
* methods like {@link #state}, {@link #failureCause} and notably {@link #toString} can be run
* without grabbing the lock.
*
* <p>To update this field correctly the lock must be held to guarantee that the state is
* consistent.
*/
private volatile StateSnapshot snapshot = new StateSnapshot(NEW);
/** Constructor for use by subclasses. */
protected AbstractService() {}
/**
* This method is called by {@link #startAsync} to initiate service startup. The invocation of
* this method should cause a call to {@link #notifyStarted()}, either during this method's run,
* or after it has returned. If startup fails, the invocation should cause a call to {@link
* #notifyFailed(Throwable)} instead.
*
* <p>This method should return promptly; prefer to do work on a different thread where it is
* convenient. It is invoked exactly once on service startup, even when {@link #startAsync} is
* called multiple times.
*/
@ForOverride
protected abstract void doStart();
/**
* This method should be used to initiate service shutdown. The invocation of this method should
* cause a call to {@link #notifyStopped()}, either during this method's run, or after it has
* returned. If shutdown fails, the invocation should cause a call to {@link
* #notifyFailed(Throwable)} instead.
*
* <p>This method should return promptly; prefer to do work on a different thread where it is
* convenient. It is invoked exactly once on service shutdown, even when {@link #stopAsync} is
* called multiple times.
*
* <p>If {@link #stopAsync} is called on a {@link State#STARTING} service, this method is not
* invoked immediately. Instead, it will be deferred until after the service is {@link
* State#RUNNING}. Services that need to cancel startup work can override {@link #doCancelStart}.
*/
@ForOverride
protected abstract void doStop();
/**
* This method is called by {@link #stopAsync} when the service is still starting (i.e. {@link
* #startAsync} has been called but {@link #notifyStarted} has not). Subclasses can override the
* method to cancel pending work and then call {@link #notifyStopped} to stop the service.
*
* <p>This method should return promptly; prefer to do work on a different thread where it is
* convenient. It is invoked exactly once on service shutdown, even when {@link #stopAsync} is
* called multiple times.
*
* <p>When this method is called {@link #state()} will return {@link State#STOPPING}, which is the
* external state observable by the caller of {@link #stopAsync}.
*
* @since 27.0
*/
@ForOverride
protected void doCancelStart() {}
@CanIgnoreReturnValue
@Override
public final Service startAsync() {
if (monitor.enterIf(isStartable)) {
try {
snapshot = new StateSnapshot(STARTING);
enqueueStartingEvent();
doStart();
} catch (Throwable startupFailure) {
restoreInterruptIfIsInterruptedException(startupFailure);
notifyFailed(startupFailure);
} finally {
monitor.leave();
dispatchListenerEvents();
}
} else {
throw new IllegalStateException("Service " + this + " has already been started");
}
return this;
}
@CanIgnoreReturnValue
@Override
public final Service stopAsync() {
if (monitor.enterIf(isStoppable)) {
try {
State previous = state();
switch (previous) {
case NEW:
snapshot = new StateSnapshot(TERMINATED);
enqueueTerminatedEvent(NEW);
break;
case STARTING:
snapshot = new StateSnapshot(STARTING, true, null);
enqueueStoppingEvent(STARTING);
doCancelStart();
break;
case RUNNING:
snapshot = new StateSnapshot(STOPPING);
enqueueStoppingEvent(RUNNING);
doStop();
break;
case STOPPING:
case TERMINATED:
case FAILED:
// These cases are impossible due to the if statement above.
throw new AssertionError("isStoppable is incorrectly implemented, saw: " + previous);
}
} catch (Throwable shutdownFailure) {
restoreInterruptIfIsInterruptedException(shutdownFailure);
notifyFailed(shutdownFailure);
} finally {
monitor.leave();
dispatchListenerEvents();
}
}
return this;
}
@Override
public final void awaitRunning() {
monitor.enterWhenUninterruptibly(hasReachedRunning);
try {
checkCurrentState(RUNNING);
} finally {
monitor.leave();
}
}
/**
* @since 28.0
*/
@Override
public final void awaitRunning(Duration timeout) throws TimeoutException {
Service.super.awaitRunning(timeout);
}
@Override
public final void awaitRunning(long timeout, TimeUnit unit) throws TimeoutException {
if (monitor.enterWhenUninterruptibly(hasReachedRunning, timeout, unit)) {
try {
checkCurrentState(RUNNING);
} finally {
monitor.leave();
}
} else {
// It is possible due to races that we are currently in the expected state even though we
// timed out. e.g. if we weren't event able to grab the lock within the timeout we would never
// even check the guard. I don't think we care too much about this use case but it could lead
// to a confusing error message.
throw new TimeoutException("Timed out waiting for " + this + " to reach the RUNNING state.");
}
}
@Override
public final void awaitTerminated() {
monitor.enterWhenUninterruptibly(isStopped);
try {
checkCurrentState(TERMINATED);
} finally {
monitor.leave();
}
}
/**
* @since 28.0
*/
@Override
public final void awaitTerminated(Duration timeout) throws TimeoutException {
Service.super.awaitTerminated(timeout);
}
@Override
public final void awaitTerminated(long timeout, TimeUnit unit) throws TimeoutException {
if (monitor.enterWhenUninterruptibly(isStopped, timeout, unit)) {
try {
checkCurrentState(TERMINATED);
} finally {
monitor.leave();
}
} else {
// It is possible due to races that we are currently in the expected state even though we
// timed out. e.g. if we weren't event able to grab the lock within the timeout we would never
// even check the guard. I don't think we care too much about this use case but it could lead
// to a confusing error message.
throw new TimeoutException(
"Timed out waiting for "
+ this
+ " to reach a terminal state. "
+ "Current state: "
+ state());
}
}
/** Checks that the current state is equal to the expected state. */
@GuardedBy("monitor")
private void checkCurrentState(State expected) {
State actual = state();
if (actual != expected) {
if (actual == FAILED) {
// Handle this specially so that we can include the failureCause, if there is one.
throw new IllegalStateException(
"Expected the service " + this + " to be " + expected + ", but the service has FAILED",
failureCause());
}
throw new IllegalStateException(
"Expected the service " + this + " to be " + expected + ", but was " + actual);
}
}
/**
* Implementing classes should invoke this method once their service has started. It will cause
* the service to transition from {@link State#STARTING} to {@link State#RUNNING}.
*
* @throws IllegalStateException if the service is not {@link State#STARTING}.
*/
protected final void notifyStarted() {
monitor.enter();
try {
// We have to examine the internal state of the snapshot here to properly handle the stop
// while starting case.
if (snapshot.state != STARTING) {
IllegalStateException failure =
new IllegalStateException(
"Cannot notifyStarted() when the service is " + snapshot.state);
notifyFailed(failure);
throw failure;
}
if (snapshot.shutdownWhenStartupFinishes) {
snapshot = new StateSnapshot(STOPPING);
// We don't call listeners here because we already did that when we set the
// shutdownWhenStartupFinishes flag.
doStop();
} else {
snapshot = new StateSnapshot(RUNNING);
enqueueRunningEvent();
}
} finally {
monitor.leave();
dispatchListenerEvents();
}
}
/**
* Implementing classes should invoke this method once their service has stopped. It will cause
* the service to transition from {@link State#STARTING} or {@link State#STOPPING} to {@link
* State#TERMINATED}.
*
* @throws IllegalStateException if the service is not one of {@link State#STOPPING}, {@link
* State#STARTING}, or {@link State#RUNNING}.
*/
protected final void notifyStopped() {
monitor.enter();
try {
State previous = state();
switch (previous) {
case NEW:
case TERMINATED:
case FAILED:
throw new IllegalStateException("Cannot notifyStopped() when the service is " + previous);
case RUNNING:
case STARTING:
case STOPPING:
snapshot = new StateSnapshot(TERMINATED);
enqueueTerminatedEvent(previous);
break;
}
} finally {
monitor.leave();
dispatchListenerEvents();
}
}
/**
* Invoke this method to transition the service to the {@link State#FAILED}. The service will
* <b>not be stopped</b> if it is running. Invoke this method when a service has failed critically
* or otherwise cannot be started nor stopped.
*/
protected final void notifyFailed(Throwable cause) {
checkNotNull(cause);
monitor.enter();
try {
State previous = state();
switch (previous) {
case NEW:
case TERMINATED:
throw new IllegalStateException("Failed while in state:" + previous, cause);
case RUNNING:
case STARTING:
case STOPPING:
snapshot = new StateSnapshot(FAILED, false, cause);
enqueueFailedEvent(previous, cause);
break;
case FAILED:
// Do nothing
break;
}
} finally {
monitor.leave();
dispatchListenerEvents();
}
}
@Override
public final boolean isRunning() {
return state() == RUNNING;
}
@Override
public final State state() {
return snapshot.externalState();
}
/**
* @since 14.0
*/
@Override
public final Throwable failureCause() {
return snapshot.failureCause();
}
/**
* @since 13.0
*/
@Override
public final void addListener(Listener listener, Executor executor) {
listeners.addListener(listener, executor);
}
@Override
public String toString() {
return getClass().getSimpleName() + " [" + state() + "]";
}
/**
* Attempts to execute all the listeners in {@link #listeners} while not holding the {@link
* #monitor}.
*/
private void dispatchListenerEvents() {
if (!monitor.isOccupiedByCurrentThread()) {
listeners.dispatch();
}
}
private void enqueueStartingEvent() {
listeners.enqueue(STARTING_EVENT);
}
private void enqueueRunningEvent() {
listeners.enqueue(RUNNING_EVENT);
}
private void enqueueStoppingEvent(State from) {
if (from == State.STARTING) {
listeners.enqueue(STOPPING_FROM_STARTING_EVENT);
} else if (from == State.RUNNING) {
listeners.enqueue(STOPPING_FROM_RUNNING_EVENT);
} else {
throw new AssertionError();
}
}
private void enqueueTerminatedEvent(State from) {
switch (from) {
case NEW:
listeners.enqueue(TERMINATED_FROM_NEW_EVENT);
break;
case STARTING:
listeners.enqueue(TERMINATED_FROM_STARTING_EVENT);
break;
case RUNNING:
listeners.enqueue(TERMINATED_FROM_RUNNING_EVENT);
break;
case STOPPING:
listeners.enqueue(TERMINATED_FROM_STOPPING_EVENT);
break;
case TERMINATED:
case FAILED:
throw new AssertionError();
}
}
private void enqueueFailedEvent(State from, Throwable cause) {
// can't memoize this one due to the exception
listeners.enqueue(
new ListenerCallQueue.Event<Listener>() {
@Override
public void call(Listener listener) {
listener.failed(from, cause);
}
@Override
public String toString() {
return "failed({from = " + from + ", cause = " + cause + "})";
}
});
}
/**
* An immutable snapshot of the current state of the service. This class represents a consistent
* snapshot of the state and therefore it can be used to answer simple queries without needing to
* grab a lock.
*/
// @Immutable except that Throwable is mutable (initCause(), setStackTrace(), mutable subclasses).
private static final class StateSnapshot {
/**
* The internal state, which equals external state unless shutdownWhenStartupFinishes is true.
*/
final State state;
/** If true, the user requested a shutdown while the service was still starting up. */
final boolean shutdownWhenStartupFinishes;
/**
* The exception that caused this service to fail. This will be {@code null} unless the service
* has failed.
*/
final @Nullable Throwable failure;
StateSnapshot(State internalState) {
this(internalState, false, null);
}
StateSnapshot(
State internalState, boolean shutdownWhenStartupFinishes, @Nullable Throwable failure) {
checkArgument(
!shutdownWhenStartupFinishes || internalState == STARTING,
"shutdownWhenStartupFinishes can only be set if state is STARTING. Got %s instead.",
internalState);
checkArgument(
(failure != null) == (internalState == FAILED),
"A failure cause should be set if and only if the state is failed. Got %s and %s "
+ "instead.",
internalState,
failure);
this.state = internalState;
this.shutdownWhenStartupFinishes = shutdownWhenStartupFinishes;
this.failure = failure;
}
/**
* @see Service#state()
*/
State externalState() {
if (shutdownWhenStartupFinishes && state == STARTING) {
return STOPPING;
} else {
return state;
}
}
/**
* @see Service#failureCause()
*/
Throwable failureCause() {
checkState(
state == FAILED,
"failureCause() is only valid if the service has failed, service is %s",
state);
// requireNonNull is safe because the constructor requires a non-null cause with state=FAILED.
return requireNonNull(failure);
}
}
} | java | github | https://github.com/google/guava | guava/src/com/google/common/util/concurrent/AbstractService.java |
from unittest import TestCase
from django.test import tag
@tag('syntax_error')
class SyntaxErrorTestCase(TestCase):
pass
1syntax_error # NOQA | python | github | https://github.com/django/django | tests/test_runner_apps/tagged/tests_syntax_error.py |
#!/usr/bin/env python
import subprocess
import sys
import css_properties
import in_generator
import license
HEADER_TEMPLATE = """
%(license)s
#ifndef %(class_name)s_h
#define %(class_name)s_h
#include "core/css/parser/CSSParserMode.h"
#include "wtf/HashFunctions.h"
#include "wtf/HashTraits.h"
#include <string.h>
namespace WTF {
class AtomicString;
class String;
}
namespace blink {
enum CSSPropertyID {
CSSPropertyInvalid = 0,
%(property_enums)s
};
const int firstCSSProperty = %(first_property_id)s;
const int numCSSProperties = %(properties_count)s;
const int lastCSSProperty = %(last_property_id)d;
const int lastUnresolvedCSSProperty = %(last_unresolved_property_id)d;
const size_t maxCSSPropertyNameLength = %(max_name_length)d;
const char* getPropertyName(CSSPropertyID);
const WTF::AtomicString& getPropertyNameAtomicString(CSSPropertyID);
WTF::String getPropertyNameString(CSSPropertyID);
WTF::String getJSPropertyName(CSSPropertyID);
inline CSSPropertyID convertToCSSPropertyID(int value)
{
ASSERT((value >= firstCSSProperty && value <= lastCSSProperty) || value == CSSPropertyInvalid);
return static_cast<CSSPropertyID>(value);
}
inline CSSPropertyID resolveCSSPropertyID(CSSPropertyID id)
{
return convertToCSSPropertyID(id & ~512);
}
inline bool isPropertyAlias(CSSPropertyID id) { return id & 512; }
CSSPropertyID unresolvedCSSPropertyID(const WTF::String&);
CSSPropertyID cssPropertyID(const WTF::String&);
} // namespace blink
namespace WTF {
template<> struct DefaultHash<blink::CSSPropertyID> { typedef IntHash<unsigned> Hash; };
template<> struct HashTraits<blink::CSSPropertyID> : GenericHashTraits<blink::CSSPropertyID> {
static const bool emptyValueIsZero = true;
static void constructDeletedValue(blink::CSSPropertyID& slot, bool) { slot = static_cast<blink::CSSPropertyID>(blink::lastUnresolvedCSSProperty + 1); }
static bool isDeletedValue(blink::CSSPropertyID value) { return value == (blink::lastUnresolvedCSSProperty + 1); }
};
}
#endif // %(class_name)s_h
"""
GPERF_TEMPLATE = """
%%{
%(license)s
#include "config.h"
#include "%(class_name)s.h"
#include "core/css/HashTools.h"
#include <string.h>
#include "wtf/ASCIICType.h"
#include "wtf/text/AtomicString.h"
#include "wtf/text/WTFString.h"
namespace blink {
static const char propertyNameStringsPool[] = {
%(property_name_strings)s
};
static const unsigned short propertyNameStringsOffsets[] = {
%(property_name_offsets)s
};
%%}
%%struct-type
struct Property;
%%omit-struct-type
%%language=C++
%%readonly-tables
%%global-table
%%compare-strncmp
%%define class-name %(class_name)sHash
%%define lookup-function-name findPropertyImpl
%%define hash-function-name property_hash_function
%%define slot-name nameOffset
%%define word-array-name property_word_list
%%enum
%%%%
%(property_to_enum_map)s
%%%%
const Property* findProperty(register const char* str, register unsigned int len)
{
return %(class_name)sHash::findPropertyImpl(str, len);
}
const char* getPropertyName(CSSPropertyID id)
{
ASSERT(id >= firstCSSProperty && id <= lastUnresolvedCSSProperty);
int index = id - firstCSSProperty;
return propertyNameStringsPool + propertyNameStringsOffsets[index];
}
const AtomicString& getPropertyNameAtomicString(CSSPropertyID id)
{
ASSERT(id >= firstCSSProperty && id <= lastUnresolvedCSSProperty);
int index = id - firstCSSProperty;
static AtomicString* propertyStrings = new AtomicString[lastUnresolvedCSSProperty]; // Intentionally never destroyed.
AtomicString& propertyString = propertyStrings[index];
if (propertyString.isNull()) {
const char* propertyName = propertyNameStringsPool + propertyNameStringsOffsets[index];
propertyString = AtomicString(propertyName, strlen(propertyName), AtomicString::ConstructFromLiteral);
}
return propertyString;
}
String getPropertyNameString(CSSPropertyID id)
{
// We share the StringImpl with the AtomicStrings.
return getPropertyNameAtomicString(id).string();
}
String getJSPropertyName(CSSPropertyID id)
{
char result[maxCSSPropertyNameLength + 1];
const char* cssPropertyName = getPropertyName(id);
const char* propertyNamePointer = cssPropertyName;
if (!propertyNamePointer)
return emptyString();
char* resultPointer = result;
while (char character = *propertyNamePointer++) {
if (character == '-') {
char nextCharacter = *propertyNamePointer++;
if (!nextCharacter)
break;
character = (propertyNamePointer - 2 != cssPropertyName) ? toASCIIUpper(nextCharacter) : nextCharacter;
}
*resultPointer++ = character;
}
*resultPointer = '\\0';
return String(result);
}
CSSPropertyID cssPropertyID(const String& string)
{
return resolveCSSPropertyID(unresolvedCSSPropertyID(string));
}
} // namespace blink
"""
class CSSPropertyNamesWriter(css_properties.CSSProperties):
class_name = "CSSPropertyNames"
def __init__(self, in_file_path):
super(CSSPropertyNamesWriter, self).__init__(in_file_path)
self._outputs = {(self.class_name + ".h"): self.generate_header,
(self.class_name + ".cpp"): self.generate_implementation,
}
def _enum_declaration(self, property):
return " %(property_id)s = %(enum_value)s," % property
def generate_header(self):
return HEADER_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'property_enums': "\n".join(map(self._enum_declaration, self._properties_including_aliases)),
'first_property_id': self._first_enum_value,
'properties_count': len(self._properties),
'last_property_id': self._first_enum_value + len(self._properties) - 1,
'last_unresolved_property_id': max(property["enum_value"] for property in self._properties_including_aliases),
'max_name_length': max(map(len, self._properties)),
}
def generate_implementation(self):
enum_value_to_name = {property['enum_value']: property['name'] for property in self._properties_including_aliases}
property_offsets = []
property_names = []
current_offset = 0
for enum_value in range(1, max(enum_value_to_name) + 1):
property_offsets.append(current_offset)
if enum_value in enum_value_to_name:
name = enum_value_to_name[enum_value]
property_names.append(name)
current_offset += len(name) + 1
css_name_and_enum_pairs = [(property['name'], property['property_id']) for property in self._properties_including_aliases]
gperf_input = GPERF_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'property_name_strings': '\n'.join(' "%s\\0"' % name for name in property_names),
'property_name_offsets': '\n'.join(' %d,' % offset for offset in property_offsets),
'property_to_enum_map': '\n'.join('%s, %s' % property for property in css_name_and_enum_pairs),
}
# FIXME: If we could depend on Python 2.7, we would use subprocess.check_output
gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n']
gperf_args.extend(['-m', '50']) # Pick best of 50 attempts.
gperf_args.append('-D') # Allow duplicate hashes -> More compact code.
gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
return gperf.communicate(gperf_input)[0]
if __name__ == "__main__":
in_generator.Maker(CSSPropertyNamesWriter).main(sys.argv) | unknown | codeparrot/codeparrot-clean | ||
"""
This module is pending deprecation as of Django 1.6 and will be removed in
version 1.8.
"""
import json
import re
import unittest as real_unittest
import warnings
from django.db.models import get_app, get_apps
from django.test import _doctest as doctest
from django.test import runner
from django.test.utils import compare_xml, strip_quotes
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestSuiteRunner',)
warnings.warn(
"The django.test.simple module and DjangoTestSuiteRunner are deprecated; "
"use django.test.runner.DiscoverRunner instead.",
PendingDeprecationWarning)
# The module name for tests outside models.py
TEST_MODULE = 'tests'
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)",
lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
class OutputChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
"""
The entry method for doctest output checking. Defers to a sequence of
child checkers
"""
checks = (self.check_output_default,
self.check_output_numeric,
self.check_output_xml,
self.check_output_json)
for check in checks:
if check(want, got, optionflags):
return True
return False
def check_output_default(self, want, got, optionflags):
"""
The default comparator provided by doctest - not perfect, but good for
most purposes
"""
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def check_output_numeric(self, want, got, optionflags):
"""Doctest does an exact string comparison of output, which means that
some numerically equivalent values aren't equal. This check normalizes
* long integers (22L) so that they equal normal integers. (22)
* Decimals so that they are comparable, regardless of the change
made to __repr__ in Python 2.6.
"""
return doctest.OutputChecker.check_output(self,
normalize_decimals(normalize_long_ints(want)),
normalize_decimals(normalize_long_ints(got)),
optionflags)
def check_output_xml(self, want, got, optionsflags):
try:
return compare_xml(want, got)
except Exception:
return False
def check_output_json(self, want, got, optionsflags):
"""
Tries to compare want and got as if they were JSON-encoded data
"""
want, got = strip_quotes(want, got)
try:
want_json = json.loads(want)
got_json = json.loads(got)
except Exception:
return False
return want_json == got_json
class DocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS
doctestOutputChecker = OutputChecker()
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def make_doctest(module):
return doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner,
)
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(make_doctest(app_module))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(make_doctest(test_module))
except ValueError:
# No doc tests in tests.py
pass
return suite
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = make_doctest(module)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
class DjangoTestSuiteRunner(runner.DiscoverRunner):
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return runner.reorder_suite(suite, (unittest.TestCase,)) | unknown | codeparrot/codeparrot-clean | ||
//go:build !exclude_graphdriver_overlay2 && linux
package register
import (
// register the overlay2 graphdriver
_ "github.com/moby/moby/v2/daemon/graphdriver/overlay2"
) | go | github | https://github.com/moby/moby | daemon/graphdriver/register/register_overlay2.go |
/*
* TLS 1.3 key schedule
*
* Copyright The Mbed TLS Contributors
* SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
*/
#if !defined(MBEDTLS_SSL_TLS1_3_KEYS_H)
#define MBEDTLS_SSL_TLS1_3_KEYS_H
/* This requires MBEDTLS_SSL_TLS1_3_LABEL( idx, name, string ) to be defined at
* the point of use. See e.g. the definition of mbedtls_ssl_tls13_labels_union
* below. */
#define MBEDTLS_SSL_TLS1_3_LABEL_LIST \
MBEDTLS_SSL_TLS1_3_LABEL(finished, "finished") \
MBEDTLS_SSL_TLS1_3_LABEL(resumption, "resumption") \
MBEDTLS_SSL_TLS1_3_LABEL(traffic_upd, "traffic upd") \
MBEDTLS_SSL_TLS1_3_LABEL(exporter, "exporter") \
MBEDTLS_SSL_TLS1_3_LABEL(key, "key") \
MBEDTLS_SSL_TLS1_3_LABEL(iv, "iv") \
MBEDTLS_SSL_TLS1_3_LABEL(c_hs_traffic, "c hs traffic") \
MBEDTLS_SSL_TLS1_3_LABEL(c_ap_traffic, "c ap traffic") \
MBEDTLS_SSL_TLS1_3_LABEL(c_e_traffic, "c e traffic") \
MBEDTLS_SSL_TLS1_3_LABEL(s_hs_traffic, "s hs traffic") \
MBEDTLS_SSL_TLS1_3_LABEL(s_ap_traffic, "s ap traffic") \
MBEDTLS_SSL_TLS1_3_LABEL(s_e_traffic, "s e traffic") \
MBEDTLS_SSL_TLS1_3_LABEL(e_exp_master, "e exp master") \
MBEDTLS_SSL_TLS1_3_LABEL(res_master, "res master") \
MBEDTLS_SSL_TLS1_3_LABEL(exp_master, "exp master") \
MBEDTLS_SSL_TLS1_3_LABEL(ext_binder, "ext binder") \
MBEDTLS_SSL_TLS1_3_LABEL(res_binder, "res binder") \
MBEDTLS_SSL_TLS1_3_LABEL(derived, "derived") \
MBEDTLS_SSL_TLS1_3_LABEL(client_cv, "TLS 1.3, client CertificateVerify") \
MBEDTLS_SSL_TLS1_3_LABEL(server_cv, "TLS 1.3, server CertificateVerify")
#define MBEDTLS_SSL_TLS1_3_CONTEXT_UNHASHED 0
#define MBEDTLS_SSL_TLS1_3_CONTEXT_HASHED 1
#define MBEDTLS_SSL_TLS1_3_PSK_EXTERNAL 0
#define MBEDTLS_SSL_TLS1_3_PSK_RESUMPTION 1
#if defined(MBEDTLS_SSL_PROTO_TLS1_3)
/* We need to tell the compiler that we meant to leave out the null character. */
#define MBEDTLS_SSL_TLS1_3_LABEL(name, string) \
const unsigned char name [sizeof(string) - 1] MBEDTLS_ATTRIBUTE_UNTERMINATED_STRING;
union mbedtls_ssl_tls13_labels_union {
MBEDTLS_SSL_TLS1_3_LABEL_LIST
};
struct mbedtls_ssl_tls13_labels_struct {
MBEDTLS_SSL_TLS1_3_LABEL_LIST
};
#undef MBEDTLS_SSL_TLS1_3_LABEL
extern const struct mbedtls_ssl_tls13_labels_struct mbedtls_ssl_tls13_labels;
#define MBEDTLS_SSL_TLS1_3_LBL_LEN(LABEL) \
sizeof(mbedtls_ssl_tls13_labels.LABEL)
#define MBEDTLS_SSL_TLS1_3_LBL_WITH_LEN(LABEL) \
mbedtls_ssl_tls13_labels.LABEL, \
MBEDTLS_SSL_TLS1_3_LBL_LEN(LABEL)
/* Maximum length of the label field in the HkdfLabel struct defined in
* RFC 8446, Section 7.1, excluding the "tls13 " prefix. */
#define MBEDTLS_SSL_TLS1_3_HKDF_LABEL_MAX_LABEL_LEN 249
/* The maximum length of HKDF contexts used in the TLS 1.3 standard.
* Since contexts are always hashes of message transcripts, this can
* be approximated from above by the maximum hash size. */
#define MBEDTLS_SSL_TLS1_3_KEY_SCHEDULE_MAX_CONTEXT_LEN \
PSA_HASH_MAX_SIZE
/* Maximum desired length for expanded key material generated
* by HKDF-Expand-Label. This algorithm can output up to 255 * hash_size
* bytes of key material where hash_size is the output size of the
* underlying hash function. */
#define MBEDTLS_SSL_TLS1_3_KEY_SCHEDULE_MAX_EXPANSION_LEN \
(255 * MBEDTLS_TLS1_3_MD_MAX_SIZE)
/**
* \brief The \c HKDF-Expand-Label function from
* the TLS 1.3 standard RFC 8446.
*
* <tt>
* HKDF-Expand-Label( Secret, Label, Context, Length ) =
* HKDF-Expand( Secret, HkdfLabel, Length )
* </tt>
*
* \param hash_alg The identifier for the hash algorithm to use.
* \param secret The \c Secret argument to \c HKDF-Expand-Label.
* This must be a readable buffer of length
* \p secret_len Bytes.
* \param secret_len The length of \p secret in Bytes.
* \param label The \c Label argument to \c HKDF-Expand-Label.
* This must be a readable buffer of length
* \p label_len Bytes.
* \param label_len The length of \p label in Bytes.
* \param ctx The \c Context argument to \c HKDF-Expand-Label.
* This must be a readable buffer of length \p ctx_len Bytes.
* \param ctx_len The length of \p context in Bytes.
* \param buf The destination buffer to hold the expanded secret.
* This must be a writable buffer of length \p buf_len Bytes.
* \param buf_len The desired size of the expanded secret in Bytes.
*
* \returns \c 0 on success.
* \return A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_hkdf_expand_label(
psa_algorithm_t hash_alg,
const unsigned char *secret, size_t secret_len,
const unsigned char *label, size_t label_len,
const unsigned char *ctx, size_t ctx_len,
unsigned char *buf, size_t buf_len);
/**
* \brief This function is part of the TLS 1.3 key schedule.
* It extracts key and IV for the actual client/server traffic
* from the client/server traffic secrets.
*
* From RFC 8446:
*
* <tt>
* [sender]_write_key = HKDF-Expand-Label(Secret, "key", "", key_length)
* [sender]_write_iv = HKDF-Expand-Label(Secret, "iv", "", iv_length)*
* </tt>
*
* \param hash_alg The identifier for the hash algorithm to be used
* for the HKDF-based expansion of the secret.
* \param client_secret The client traffic secret.
* This must be a readable buffer of size
* \p secret_len Bytes
* \param server_secret The server traffic secret.
* This must be a readable buffer of size
* \p secret_len Bytes
* \param secret_len Length of the secrets \p client_secret and
* \p server_secret in Bytes.
* \param key_len The desired length of the key to be extracted in Bytes.
* \param iv_len The desired length of the IV to be extracted in Bytes.
* \param keys The address of the structure holding the generated
* keys and IVs.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_make_traffic_keys(
psa_algorithm_t hash_alg,
const unsigned char *client_secret,
const unsigned char *server_secret, size_t secret_len,
size_t key_len, size_t iv_len,
mbedtls_ssl_key_set *keys);
/**
* \brief The \c Derive-Secret function from the TLS 1.3 standard RFC 8446.
*
* <tt>
* Derive-Secret( Secret, Label, Messages ) =
* HKDF-Expand-Label( Secret, Label,
* Hash( Messages ),
* Hash.Length ) )
* </tt>
*
* \param hash_alg The identifier for the hash function used for the
* applications of HKDF.
* \param secret The \c Secret argument to the \c Derive-Secret function.
* This must be a readable buffer of length
* \p secret_len Bytes.
* \param secret_len The length of \p secret in Bytes.
* \param label The \c Label argument to the \c Derive-Secret function.
* This must be a readable buffer of length
* \p label_len Bytes.
* \param label_len The length of \p label in Bytes.
* \param ctx The hash of the \c Messages argument to the
* \c Derive-Secret function, or the \c Messages argument
* itself, depending on \p ctx_hashed.
* \param ctx_len The length of \p ctx in Bytes.
* \param ctx_hashed This indicates whether the \p ctx contains the hash of
* the \c Messages argument in the application of the
* \c Derive-Secret function
* (value MBEDTLS_SSL_TLS1_3_CONTEXT_HASHED), or whether
* it is the content of \c Messages itself, in which case
* the function takes care of the hashing
* (value MBEDTLS_SSL_TLS1_3_CONTEXT_UNHASHED).
* \param dstbuf The target buffer to write the output of
* \c Derive-Secret to. This must be a writable buffer of
* size \p dtsbuf_len Bytes.
* \param dstbuf_len The length of \p dstbuf in Bytes.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_derive_secret(
psa_algorithm_t hash_alg,
const unsigned char *secret, size_t secret_len,
const unsigned char *label, size_t label_len,
const unsigned char *ctx, size_t ctx_len,
int ctx_hashed,
unsigned char *dstbuf, size_t dstbuf_len);
/**
* \brief Derive TLS 1.3 early data key material from early secret.
*
* This is a small wrapper invoking mbedtls_ssl_tls13_derive_secret()
* with the appropriate labels.
*
* <tt>
* Early Secret
* |
* +-----> Derive-Secret(., "c e traffic", ClientHello)
* | = client_early_traffic_secret
* |
* +-----> Derive-Secret(., "e exp master", ClientHello)
* . = early_exporter_master_secret
* .
* .
* </tt>
*
* \note To obtain the actual key and IV for the early data traffic,
* the client secret derived by this function need to be
* further processed by mbedtls_ssl_tls13_make_traffic_keys().
*
* \note The binder key, which is also generated from the early secret,
* is omitted here. Its calculation is part of the separate routine
* mbedtls_ssl_tls13_create_psk_binder().
*
* \param hash_alg The hash algorithm associated with the PSK for which
* early data key material is being derived.
* \param early_secret The early secret from which the early data key material
* should be derived. This must be a readable buffer whose
* length is the digest size of the hash algorithm
* represented by \p md_size.
* \param transcript The transcript of the handshake so far, calculated with
* respect to \p hash_alg. This must be a readable buffer
* whose length is the digest size of the hash algorithm
* represented by \p md_size.
* \param derived The address of the structure in which to store
* the early data key material.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_derive_early_secrets(
psa_algorithm_t hash_alg,
unsigned char const *early_secret,
unsigned char const *transcript, size_t transcript_len,
mbedtls_ssl_tls13_early_secrets *derived);
/**
* \brief Derive TLS 1.3 handshake key material from the handshake secret.
*
* This is a small wrapper invoking mbedtls_ssl_tls13_derive_secret()
* with the appropriate labels from the standard.
*
* <tt>
* Handshake Secret
* |
* +-----> Derive-Secret( ., "c hs traffic",
* | ClientHello...ServerHello )
* | = client_handshake_traffic_secret
* |
* +-----> Derive-Secret( ., "s hs traffic",
* . ClientHello...ServerHello )
* . = server_handshake_traffic_secret
* .
* </tt>
*
* \note To obtain the actual key and IV for the encrypted handshake traffic,
* the client and server secret derived by this function need to be
* further processed by mbedtls_ssl_tls13_make_traffic_keys().
*
* \param hash_alg The hash algorithm associated with the ciphersuite
* that's being used for the connection.
* \param handshake_secret The handshake secret from which the handshake key
* material should be derived. This must be a readable
* buffer whose length is the digest size of the hash
* algorithm represented by \p md_size.
* \param transcript The transcript of the handshake so far, calculated
* with respect to \p hash_alg. This must be a readable
* buffer whose length is the digest size of the hash
* algorithm represented by \p md_size.
* \param derived The address of the structure in which to
* store the handshake key material.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_derive_handshake_secrets(
psa_algorithm_t hash_alg,
unsigned char const *handshake_secret,
unsigned char const *transcript, size_t transcript_len,
mbedtls_ssl_tls13_handshake_secrets *derived);
/**
* \brief Derive TLS 1.3 application key material from the master secret.
*
* This is a small wrapper invoking mbedtls_ssl_tls13_derive_secret()
* with the appropriate labels from the standard.
*
* <tt>
* Master Secret
* |
* +-----> Derive-Secret( ., "c ap traffic",
* | ClientHello...server Finished )
* | = client_application_traffic_secret_0
* |
* +-----> Derive-Secret( ., "s ap traffic",
* | ClientHello...Server Finished )
* | = server_application_traffic_secret_0
* |
* +-----> Derive-Secret( ., "exp master",
* . ClientHello...server Finished)
* . = exporter_master_secret
* .
* </tt>
*
* \note To obtain the actual key and IV for the (0-th) application traffic,
* the client and server secret derived by this function need to be
* further processed by mbedtls_ssl_tls13_make_traffic_keys().
*
* \param hash_alg The hash algorithm associated with the ciphersuite
* that's being used for the connection.
* \param master_secret The master secret from which the application key
* material should be derived. This must be a readable
* buffer whose length is the digest size of the hash
* algorithm represented by \p md_size.
* \param transcript The transcript of the handshake up to and including
* the ServerFinished message, calculated with respect
* to \p hash_alg. This must be a readable buffer whose
* length is the digest size of the hash algorithm
* represented by \p hash_alg.
* \param derived The address of the structure in which to
* store the application key material.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_derive_application_secrets(
psa_algorithm_t hash_alg,
unsigned char const *master_secret,
unsigned char const *transcript, size_t transcript_len,
mbedtls_ssl_tls13_application_secrets *derived);
/**
* \brief Derive TLS 1.3 resumption master secret from the master secret.
*
* This is a small wrapper invoking mbedtls_ssl_tls13_derive_secret()
* with the appropriate labels from the standard.
*
* \param hash_alg The hash algorithm used in the application for which
* key material is being derived.
* \param application_secret The application secret from which the resumption master
* secret should be derived. This must be a readable
* buffer whose length is the digest size of the hash
* algorithm represented by \p md_size.
* \param transcript The transcript of the handshake up to and including
* the ClientFinished message, calculated with respect
* to \p hash_alg. This must be a readable buffer whose
* length is the digest size of the hash algorithm
* represented by \p hash_alg.
* \param transcript_len The length of \p transcript in Bytes.
* \param derived The address of the structure in which to
* store the resumption master secret.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_derive_resumption_master_secret(
psa_algorithm_t hash_alg,
unsigned char const *application_secret,
unsigned char const *transcript, size_t transcript_len,
mbedtls_ssl_tls13_application_secrets *derived);
/**
* \brief Compute the next secret in the TLS 1.3 key schedule
*
* The TLS 1.3 key schedule proceeds as follows to compute
* the three main secrets during the handshake: The early
* secret for early data, the handshake secret for all
* other encrypted handshake messages, and the master
* secret for all application traffic.
*
* <tt>
* 0
* |
* v
* PSK -> HKDF-Extract = Early Secret
* |
* v
* Derive-Secret( ., "derived", "" )
* |
* v
* (EC)DHE -> HKDF-Extract = Handshake Secret
* |
* v
* Derive-Secret( ., "derived", "" )
* |
* v
* 0 -> HKDF-Extract = Master Secret
* </tt>
*
* Each of the three secrets in turn is the basis for further
* key derivations, such as the derivation of traffic keys and IVs;
* see e.g. mbedtls_ssl_tls13_make_traffic_keys().
*
* This function implements one step in this evolution of secrets:
*
* <tt>
* old_secret
* |
* v
* Derive-Secret( ., "derived", "" )
* |
* v
* input -> HKDF-Extract = new_secret
* </tt>
*
* \param hash_alg The identifier for the hash function used for the
* applications of HKDF.
* \param secret_old The address of the buffer holding the old secret
* on function entry. If not \c NULL, this must be a
* readable buffer whose size matches the output size
* of the hash function represented by \p hash_alg.
* If \c NULL, an all \c 0 array will be used instead.
* \param input The address of the buffer holding the additional
* input for the key derivation (e.g., the PSK or the
* ephemeral (EC)DH secret). If not \c NULL, this must be
* a readable buffer whose size \p input_len Bytes.
* If \c NULL, an all \c 0 array will be used instead.
* \param input_len The length of \p input in Bytes.
* \param secret_new The address of the buffer holding the new secret
* on function exit. This must be a writable buffer
* whose size matches the output size of the hash
* function represented by \p hash_alg.
* This may be the same as \p secret_old.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_evolve_secret(
psa_algorithm_t hash_alg,
const unsigned char *secret_old,
const unsigned char *input, size_t input_len,
unsigned char *secret_new);
/**
* \brief Calculate a TLS 1.3 PSK binder.
*
* \param ssl The SSL context. This is used for debugging only and may
* be \c NULL if MBEDTLS_DEBUG_C is disabled.
* \param hash_alg The hash algorithm associated to the PSK \p psk.
* \param psk The buffer holding the PSK for which to create a binder.
* \param psk_len The size of \p psk in bytes.
* \param psk_type This indicates whether the PSK \p psk is externally
* provisioned (#MBEDTLS_SSL_TLS1_3_PSK_EXTERNAL) or a
* resumption PSK (#MBEDTLS_SSL_TLS1_3_PSK_RESUMPTION).
* \param transcript The handshake transcript up to the point where the
* PSK binder calculation happens. This must be readable,
* and its size must be equal to the digest size of
* the hash algorithm represented by \p hash_alg.
* \param result The address at which to store the PSK binder on success.
* This must be writable, and its size must be equal to the
* digest size of the hash algorithm represented by
* \p hash_alg.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_create_psk_binder(mbedtls_ssl_context *ssl,
const psa_algorithm_t hash_alg,
unsigned char const *psk, size_t psk_len,
int psk_type,
unsigned char const *transcript,
unsigned char *result);
/**
* \bref Setup an SSL transform structure representing the
* record protection mechanism used by TLS 1.3
*
* \param transform The SSL transform structure to be created. This must have
* been initialized through mbedtls_ssl_transform_init() and
* not used in any other way prior to calling this function.
* In particular, this function does not clean up the
* transform structure prior to installing the new keys.
* \param endpoint Indicates whether the transform is for the client
* (value #MBEDTLS_SSL_IS_CLIENT) or the server
* (value #MBEDTLS_SSL_IS_SERVER).
* \param ciphersuite The numerical identifier for the ciphersuite to use.
* This must be one of the identifiers listed in
* ssl_ciphersuites.h.
* \param traffic_keys The key material to use. No reference is stored in
* the SSL transform being generated, and the caller
* should destroy the key material afterwards.
* \param ssl (Debug-only) The SSL context to use for debug output
* in case of failure. This parameter is only needed if
* #MBEDTLS_DEBUG_C is set, and is ignored otherwise.
*
* \return \c 0 on success. In this case, \p transform is ready to
* be used with mbedtls_ssl_transform_decrypt() and
* mbedtls_ssl_transform_encrypt().
* \return A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_populate_transform(mbedtls_ssl_transform *transform,
int endpoint,
int ciphersuite,
mbedtls_ssl_key_set const *traffic_keys,
mbedtls_ssl_context *ssl);
/*
* TLS 1.3 key schedule evolutions
*
* Early -> Handshake -> Application
*
* Small wrappers around mbedtls_ssl_tls13_evolve_secret().
*/
/**
* \brief Begin TLS 1.3 key schedule by calculating early secret.
*
* The TLS 1.3 key schedule can be viewed as a simple state machine
* with states Initial -> Early -> Handshake -> Application, and
* this function represents the Initial -> Early transition.
*
* \param ssl The SSL context to operate on.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_key_schedule_stage_early(mbedtls_ssl_context *ssl);
/**
* \brief Compute TLS 1.3 resumption master secret.
*
* \param ssl The SSL context to operate on. This must be in
* key schedule stage \c Application, see
* mbedtls_ssl_tls13_key_schedule_stage_application().
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_compute_resumption_master_secret(mbedtls_ssl_context *ssl);
/**
* \brief Calculate the verify_data value for the client or server TLS 1.3
* Finished message.
*
* \param ssl The SSL context to operate on. This must be in
* key schedule stage \c Handshake, see
* mbedtls_ssl_tls13_key_schedule_stage_application().
* \param dst The address at which to write the verify_data value.
* \param dst_len The size of \p dst in bytes.
* \param actual_len The address at which to store the amount of data
* actually written to \p dst upon success.
* \param which The message to calculate the `verify_data` for:
* - #MBEDTLS_SSL_IS_CLIENT for the Client's Finished message
* - #MBEDTLS_SSL_IS_SERVER for the Server's Finished message
*
* \note Both client and server call this function twice, once to
* generate their own Finished message, and once to verify the
* peer's Finished message.
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_calculate_verify_data(mbedtls_ssl_context *ssl,
unsigned char *dst,
size_t dst_len,
size_t *actual_len,
int which);
#if defined(MBEDTLS_SSL_EARLY_DATA)
/**
* \brief Compute TLS 1.3 early transform
*
* \param ssl The SSL context to operate on.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*
* \warning The function does not compute the early master secret. Call
* mbedtls_ssl_tls13_key_schedule_stage_early() before to
* call this function to generate the early master secret.
* \note For a client/server endpoint, the function computes only the
* encryption/decryption part of the transform as the decryption/
* encryption part is not defined by the specification (no early
* traffic from the server to the client).
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_compute_early_transform(mbedtls_ssl_context *ssl);
#endif /* MBEDTLS_SSL_EARLY_DATA */
/**
* \brief Compute TLS 1.3 handshake transform
*
* \param ssl The SSL context to operate on. The early secret must have been
* computed.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_compute_handshake_transform(mbedtls_ssl_context *ssl);
/**
* \brief Compute TLS 1.3 application transform
*
* \param ssl The SSL context to operate on. The early secret must have been
* computed.
*
* \returns \c 0 on success.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_compute_application_transform(mbedtls_ssl_context *ssl);
#if defined(MBEDTLS_SSL_TLS1_3_KEY_EXCHANGE_MODE_SOME_PSK_ENABLED)
/**
* \brief Export TLS 1.3 PSK from handshake context
*
* \param[in] ssl The SSL context to operate on.
* \param[out] psk PSK output pointer.
* \param[out] psk_len Length of PSK.
*
* \returns \c 0 if there is a configured PSK and it was exported
* successfully.
* \returns A negative error code on failure.
*/
MBEDTLS_CHECK_RETURN_CRITICAL
int mbedtls_ssl_tls13_export_handshake_psk(mbedtls_ssl_context *ssl,
unsigned char **psk,
size_t *psk_len);
#endif
/**
* \brief Calculate TLS-Exporter function as defined in RFC 8446, Section 7.5.
*
* \param[in] hash_alg The hash algorithm.
* \param[in] secret The secret to use. (Should be the exporter master secret.)
* \param[in] secret_len Length of secret.
* \param[in] label The label of the exported key.
* \param[in] label_len The length of label.
* \param[out] out The output buffer for the exported key. Must have room for at least out_len bytes.
* \param[in] out_len Length of the key to generate.
*/
int mbedtls_ssl_tls13_exporter(const psa_algorithm_t hash_alg,
const unsigned char *secret, const size_t secret_len,
const unsigned char *label, const size_t label_len,
const unsigned char *context_value, const size_t context_len,
uint8_t *out, const size_t out_len);
#endif /* MBEDTLS_SSL_PROTO_TLS1_3 */
#endif /* MBEDTLS_SSL_TLS1_3_KEYS_H */ | c | github | https://github.com/nodejs/node | deps/LIEF/third-party/mbedtls/library/ssl_tls13_keys.h |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Interface classes for `pyglet.input`.
:since: pyglet 1.2
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import sys
from pyglet.event import EventDispatcher
_is_epydoc = hasattr(sys, 'is_epydoc') and sys.is_epydoc
class DeviceException(Exception):
pass
class DeviceOpenException(DeviceException):
pass
class DeviceExclusiveException(DeviceException):
pass
class Device(object):
'''Input device.
:Ivariables:
`display` : `Display`
Display this device is connected to.
`name` : str
Name of the device, as described by the device firmware.
`manufacturer` : str
Name of the device manufacturer, or ``None`` if the information is
not available.
'''
def __init__(self, display, name):
self.display = display
self.name = name
self.manufacturer = None
# TODO: make private
self.is_open = False
def open(self, window=None, exclusive=False):
'''Open the device to begin receiving input from it.
:Parameters:
`window` : Window
Optional window to associate with the device. The behaviour
of this parameter is device and operating system dependant.
It can usually be omitted for most devices.
`exclusive` : bool
If ``True`` the device will be opened exclusively so that no
other application can use it. The method will raise
`DeviceExclusiveException` if the device cannot be opened this
way (for example, because another application has already
opened it).
'''
if self.is_open:
raise DeviceOpenException('Device is already open.')
self.is_open = True
def close(self):
'''Close the device.
'''
self.is_open = False
def get_controls(self):
'''Get a list of controls provided by the device.
:rtype: list of `Control`
'''
raise NotImplementedError('abstract')
def __repr__(self):
return '%s(name=%s)' % (self.__class__.__name__, self.name)
class Control(EventDispatcher):
'''Single value input provided by a device.
A control's value can be queried when the device is open. Event handlers
can be attached to the control to be called when the value changes.
The `min` and `max` properties are provided as advertised by the
device; in some cases the control's value will be outside this range.
:Ivariables:
`name` : str
Name of the control, or ``None`` if unknown
`raw_name` : str
Unmodified name of the control, as presented by the operating
system; or ``None`` if unknown.
`inverted` : bool
If ``True``, the value reported is actually inverted from what the
device reported; usually this is to provide consistency across
operating systems.
'''
_value = None
def __init__(self, name, raw_name=None):
self.name = name
self.raw_name = raw_name
self.inverted = False
def _get_value(self):
return self._value
def _set_value(self, value):
if value == self._value:
return
self._value = value
self.dispatch_event('on_change', value)
value = property(_get_value, doc='''Current value of the control.
The range of the value is device-dependent; for absolute controls
the range is given by ``min`` and ``max`` (however the value may exceed
this range); for relative controls the range is undefined.
:type: float''')
def __repr__(self):
if self.name:
return '%s(name=%s, raw_name=%s)' % (
self.__class__.__name__, self.name, self.raw_name)
else:
return '%s(raw_name=%s)' % (self.__class__.__name__, self.raw_name)
if _is_epydoc:
def on_change(self, value):
'''The value changed.
:Parameters:
`value` : float
Current value of the control.
:event:
'''
Control.register_event_type('on_change')
class RelativeAxis(Control):
'''An axis whose value represents a relative change from the previous
value.
'''
#: Name of the horizontal axis control
X = 'x'
#: Name of the vertical axis control
Y = 'y'
#: Name of the Z axis control.
Z = 'z'
#: Name of the rotational-X axis control
RX = 'rx'
#: Name of the rotational-Y axis control
RY = 'ry'
#: Name of the rotational-Z axis control
RZ = 'rz'
#: Name of the scroll wheel control
WHEEL = 'wheel'
def _get_value(self):
return self._value
def _set_value(self, value):
self._value = value
self.dispatch_event('on_change', value)
value = property(_get_value)
class AbsoluteAxis(Control):
'''An axis whose value represents a physical measurement from the device.
The value is advertised to range over ``min`` and ``max``.
:Ivariables:
`min` : float
Minimum advertised value.
`max` : float
Maximum advertised value.
'''
#: Name of the horizontal axis control
X = 'x'
#: Name of the vertical axis control
Y = 'y'
#: Name of the Z axis control.
Z = 'z'
#: Name of the rotational-X axis control
RX = 'rx'
#: Name of the rotational-Y axis control
RY = 'ry'
#: Name of the rotational-Z axis control
RZ = 'rz'
#: Name of the hat (POV) control, when a single control enumerates all of
#: the hat's positions.
HAT = 'hat'
#: Name of the hat's (POV's) horizontal control, when the hat position is
#: described by two orthogonal controls.
HAT_X = 'hat_x'
#: Name of the hat's (POV's) vertical control, when the hat position is
#: described by two orthogonal controls.
HAT_Y = 'hat_y'
def __init__(self, name, min, max, raw_name=None):
super(AbsoluteAxis, self).__init__(name, raw_name)
self.min = min
self.max = max
class Button(Control):
'''A control whose value is boolean.
'''
def _get_value(self):
return bool(self._value)
def _set_value(self, value):
if value == self._value:
return
self._value = value
self.dispatch_event('on_change', bool(value))
if value:
self.dispatch_event('on_press')
else:
self.dispatch_event('on_release')
value = property(_get_value)
if _is_epydoc:
def on_press(self):
'''The button was pressed.
:event:
'''
def on_release(self):
'''The button was released.
:event:
'''
Button.register_event_type('on_press')
Button.register_event_type('on_release')
class Joystick(EventDispatcher):
'''High-level interface for joystick-like devices. This includes analogue
and digital joysticks, gamepads, game controllers, and possibly even
steering wheels and other input devices. There is unfortunately no way to
distinguish between these different device types.
To use a joystick, first call `open`, then in your game loop examine
the values of `x`, `y`, and so on. These values are normalized to the
range [-1.0, 1.0].
To receive events when the value of an axis changes, attach an
on_joyaxis_motion event handler to the joystick. The `Joystick` instance,
axis name, and current value are passed as parameters to this event.
To handle button events, you should attach on_joybutton_press and
on_joy_button_release event handlers to the joystick. Both the `Joystick`
instance and the index of the changed button are passed as parameters to
these events.
Alternately, you may attach event handlers to each individual button in
`button_controls` to receive on_press or on_release events.
To use the hat switch, attach an on_joyhat_motion event handler to the
joystick. The handler will be called with both the hat_x and hat_y values
whenever the value of the hat switch changes.
The device name can be queried to get the name of the joystick.
:Ivariables:
`device` : `Device`
The underlying device used by this joystick interface.
`x` : float
Current X (horizontal) value ranging from -1.0 (left) to 1.0
(right).
`y` : float
Current y (vertical) value ranging from -1.0 (top) to 1.0
(bottom).
`z` : float
Current Z value ranging from -1.0 to 1.0. On joysticks the Z
value is usually the throttle control. On game controllers the Z
value is usually the secondary thumb vertical axis.
`rx` : float
Current rotational X value ranging from -1.0 to 1.0.
`ry` : float
Current rotational Y value ranging from -1.0 to 1.0.
`rz` : float
Current rotational Z value ranging from -1.0 to 1.0. On joysticks
the RZ value is usually the twist of the stick. On game
controllers the RZ value is usually the secondary thumb horizontal
axis.
`hat_x` : int
Current hat (POV) horizontal position; one of -1 (left), 0
(centered) or 1 (right).
`hat_y` : int
Current hat (POV) vertical position; one of -1 (bottom), 0
(centered) or 1 (top).
`buttons` : list of bool
List of boolean values representing current states of the buttons.
These are in order, so that button 1 has value at ``buttons[0]``,
and so on.
`x_control` : `AbsoluteAxis`
Underlying control for `x` value, or ``None`` if not available.
`y_control` : `AbsoluteAxis`
Underlying control for `y` value, or ``None`` if not available.
`z_control` : `AbsoluteAxis`
Underlying control for `z` value, or ``None`` if not available.
`rx_control` : `AbsoluteAxis`
Underlying control for `rx` value, or ``None`` if not available.
`ry_control` : `AbsoluteAxis`
Underlying control for `ry` value, or ``None`` if not available.
`rz_control` : `AbsoluteAxis`
Underlying control for `rz` value, or ``None`` if not available.
`hat_x_control` : `AbsoluteAxis`
Underlying control for `hat_x` value, or ``None`` if not available.
`hat_y_control` : `AbsoluteAxis`
Underlying control for `hat_y` value, or ``None`` if not available.
`button_controls` : list of `Button`
Underlying controls for `buttons` values.
'''
def __init__(self, device):
self.device = device
self.x = 0
self.y = 0
self.z = 0
self.rx = 0
self.ry = 0
self.rz = 0
self.hat_x = 0
self.hat_y = 0
self.buttons = []
self.x_control = None
self.y_control = None
self.z_control = None
self.rx_control = None
self.ry_control = None
self.rz_control = None
self.hat_x_control = None
self.hat_y_control = None
self.button_controls = []
def add_axis(control):
name = control.name
scale = 2.0 / (control.max - control.min)
bias = -1.0 - control.min * scale
if control.inverted:
scale = -scale
bias = -bias
setattr(self, name + '_control', control)
@control.event
def on_change(value):
normalized_value = value * scale + bias
setattr(self, name, normalized_value)
self.dispatch_event('on_joyaxis_motion', self, name, normalized_value)
def add_button(control):
i = len(self.buttons)
self.buttons.append(False)
self.button_controls.append(control)
@control.event
def on_change(value):
self.buttons[i] = value
@control.event
def on_press():
self.dispatch_event('on_joybutton_press', self, i)
@control.event
def on_release():
self.dispatch_event('on_joybutton_release', self, i)
def add_hat(control):
# 8-directional hat encoded as a single control (Windows/Mac)
self.hat_x_control = control
self.hat_y_control = control
@control.event
def on_change(value):
if value & 0xffff == 0xffff:
self.hat_x = self.hat_y = 0
else:
if control.max > 8: # DirectInput: scale value
value //= 0xfff
if 0 <= value < 8:
self.hat_x, self.hat_y = (
( 0, 1),
( 1, 1),
( 1, 0),
( 1, -1),
( 0, -1),
(-1, -1),
(-1, 0),
(-1, 1),
)[value]
else:
# Out of range
self.hat_x = self.hat_y = 0
self.dispatch_event('on_joyhat_motion', self, self.hat_x, self.hat_y)
for control in device.get_controls():
if isinstance(control, AbsoluteAxis):
if control.name in ('x', 'y', 'z', 'rx', 'ry', 'rz',
'hat_x', 'hat_y'):
add_axis(control)
elif control.name == 'hat':
add_hat(control)
elif isinstance(control, Button):
add_button(control)
def open(self, window=None, exclusive=False):
'''Open the joystick device. See `Device.open`.
'''
self.device.open(window, exclusive)
def close(self):
'''Close the joystick device. See `Device.close`.
'''
self.device.close()
def on_joyaxis_motion(self, joystick, axis, value):
'''The value of a joystick axis changed.
:Parameters:
`joystick` : `Joystick`
The joystick device whose axis changed.
`axis` : string
The name of the axis that changed.
`value` : float
The current value of the axis, normalized to [-1, 1].
'''
def on_joybutton_press(self, joystick, button):
'''A button on the joystick was pressed.
:Parameters:
`joystick` : `Joystick`
The joystick device whose button was pressed.
`button` : int
The index (in `button_controls`) of the button that was pressed.
'''
def on_joybutton_release(self, joystick, button):
'''A button on the joystick was released.
:Parameters:
`joystick` : `Joystick`
The joystick device whose button was released.
`button` : int
The index (in `button_controls`) of the button that was released.
'''
def on_joyhat_motion(self, joystick, hat_x, hat_y):
'''The value of the joystick hat switch changed.
:Parameters:
`joystick` : `Joystick`
The joystick device whose hat control changed.
`hat_x` : int
Current hat (POV) horizontal position; one of -1 (left), 0
(centered) or 1 (right).
`hat_y` : int
Current hat (POV) vertical position; one of -1 (bottom), 0
(centered) or 1 (top).
'''
Joystick.register_event_type('on_joyaxis_motion')
Joystick.register_event_type('on_joybutton_press')
Joystick.register_event_type('on_joybutton_release')
Joystick.register_event_type('on_joyhat_motion')
class AppleRemote(EventDispatcher):
'''High-level interface for Apple remote control.
This interface provides access to the 6 button controls on the remote.
Pressing and holding certain buttons on the remote is interpreted as
a separate control.
:Ivariables:
`device` : `Device`
The underlying device used by this interface.
`left_control` : `Button`
Button control for the left (prev) button.
`left_hold_control` : `Button`
Button control for holding the left button (rewind).
`right_control` : `Button`
Button control for the right (next) button.
`right_hold_control` : `Button`
Button control for holding the right button (fast forward).
`up_control` : `Button`
Button control for the up (volume increase) button.
`down_control` : `Button`
Button control for the down (volume decrease) button.
`select_control` : `Button`
Button control for the select (play/pause) button.
`select_hold_control` : `Button`
Button control for holding the select button.
`menu_control` : `Button`
Button control for the menu button.
`menu_hold_control` : `Button`
Button control for holding the menu button.
'''
def __init__(self, device):
def add_button(control):
setattr(self, control.name + '_control', control)
@control.event
def on_press():
self.dispatch_event('on_button_press', control.name)
@control.event
def on_release():
self.dispatch_event('on_button_release', control.name)
self.device = device
for control in device.get_controls():
if control.name in ('left', 'left_hold', 'right', 'right_hold', 'up', 'down',
'menu', 'select', 'menu_hold', 'select_hold'):
add_button(control)
def open(self, window=None, exclusive=False):
'''Open the device. See `Device.open`.
'''
self.device.open(window, exclusive)
def close(self):
'''Close the device. See `Device.close`.
'''
self.device.close()
def on_button_press(self, button):
"""A button on the remote was pressed.
Only the 'up' and 'down' buttons will generate an event when the
button is first pressed. All other buttons on the remote will wait
until the button is released and then send both the press and release
events at the same time.
:Parameters:
`button` : unicode
The name of the button that was pressed. The valid names are
'up', 'down', 'left', 'right', 'left_hold', 'right_hold',
'menu', 'menu_hold', 'select', and 'select_hold'
:event:
"""
def on_button_release(self, button):
"""A button on the remote was released.
The 'select_hold' and 'menu_hold' button release events are sent
immediately after the corresponding press events regardless of
whether or not the user has released the button.
:Parameters:
`button` : unicode
The name of the button that was released. The valid names are
'up', 'down', 'left', 'right', 'left_hold', 'right_hold',
'menu', 'menu_hold', 'select', and 'select_hold'
:event:
"""
AppleRemote.register_event_type('on_button_press')
AppleRemote.register_event_type('on_button_release')
class Tablet(object):
'''High-level interface to tablet devices.
Unlike other devices, tablets must be opened for a specific window,
and cannot be opened exclusively. The `open` method returns a
`TabletCanvas` object, which supports the events provided by the tablet.
Currently only one tablet device can be used, though it can be opened on
multiple windows. If more than one tablet is connected, the behaviour is
undefined.
'''
def open(self, window):
'''Open a tablet device for a window.
:Parameters:
`window` : `Window`
The window on which the tablet will be used.
:rtype: `TabletCanvas`
'''
raise NotImplementedError('abstract')
class TabletCanvas(EventDispatcher):
'''Event dispatcher for tablets.
Use `Tablet.open` to obtain this object for a particular tablet device and
window. Events may be generated even if the tablet stylus is outside of
the window; this is operating-system dependent.
The events each provide the `TabletCursor` that was used to generate the
event; for example, to distinguish between a stylus and an eraser. Only
one cursor can be used at a time, otherwise the results are undefined.
:Ivariables:
`window` : Window
The window on which this tablet was opened.
'''
# OS X: Active window receives tablet events only when cursor is in window
# Windows: Active window receives all tablet events
#
# Note that this means enter/leave pairs are not always consistent (normal
# usage).
def __init__(self, window):
self.window = window
def close(self):
'''Close the tablet device for this window.
'''
raise NotImplementedError('abstract')
if _is_epydoc:
def on_enter(self, cursor):
'''A cursor entered the proximity of the window. The cursor may
be hovering above the tablet surface, but outside of the window
bounds, or it may have entered the window bounds.
Note that you cannot rely on `on_enter` and `on_leave` events to
be generated in pairs; some events may be lost if the cursor was
out of the window bounds at the time.
:Parameters:
`cursor` : `TabletCursor`
The cursor that entered proximity.
:event:
'''
def on_leave(self, cursor):
'''A cursor left the proximity of the window. The cursor may have
moved too high above the tablet surface to be detected, or it may
have left the bounds of the window.
Note that you cannot rely on `on_enter` and `on_leave` events to
be generated in pairs; some events may be lost if the cursor was
out of the window bounds at the time.
:Parameters:
`cursor` : `TabletCursor`
The cursor that left proximity.
:event:
'''
def on_motion(self, cursor, x, y, pressure):
'''The cursor moved on the tablet surface.
If `pressure` is 0, then the cursor is actually hovering above the
tablet surface, not in contact.
:Parameters:
`cursor` : `TabletCursor`
The cursor that moved.
`x` : int
The X position of the cursor, in window coordinates.
`y` : int
The Y position of the cursor, in window coordinates.
`pressure` : float
The pressure applied to the cursor, in range 0.0 (no
pressure) to 1.0 (full pressure).
`tilt_x` : float
Currently undefined.
`tilt_y` : float
Currently undefined.
:event:
'''
TabletCanvas.register_event_type('on_enter')
TabletCanvas.register_event_type('on_leave')
TabletCanvas.register_event_type('on_motion')
class TabletCursor(object):
'''A distinct cursor used on a tablet.
Most tablets support at least a *stylus* and an *erasor* cursor; this
object is used to distinguish them when tablet events are generated.
:Ivariables:
`name` : str
Name of the cursor.
'''
# TODO well-defined names for stylus and eraser.
def __init__(self, name):
self.name = name
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.name) | unknown | codeparrot/codeparrot-clean | ||
"""Support for ISY994 fans."""
import logging
from typing import Callable
from homeassistant.components.fan import (
DOMAIN,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.helpers.typing import ConfigType
from . import ISY994_NODES, ISY994_PROGRAMS, ISYDevice
_LOGGER = logging.getLogger(__name__)
VALUE_TO_STATE = {
0: SPEED_OFF,
63: SPEED_LOW,
64: SPEED_LOW,
190: SPEED_MEDIUM,
191: SPEED_MEDIUM,
255: SPEED_HIGH,
}
STATE_TO_VALUE = {}
for key in VALUE_TO_STATE:
STATE_TO_VALUE[VALUE_TO_STATE[key]] = key
def setup_platform(
hass, config: ConfigType, add_entities: Callable[[list], None], discovery_info=None
):
"""Set up the ISY994 fan platform."""
devices = []
for node in hass.data[ISY994_NODES][DOMAIN]:
devices.append(ISYFanDevice(node))
for name, status, actions in hass.data[ISY994_PROGRAMS][DOMAIN]:
devices.append(ISYFanProgram(name, status, actions))
add_entities(devices)
class ISYFanDevice(ISYDevice, FanEntity):
"""Representation of an ISY994 fan device."""
@property
def speed(self) -> str:
"""Return the current speed."""
return VALUE_TO_STATE.get(self.value)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
return self.value != 0
def set_speed(self, speed: str) -> None:
"""Send the set speed command to the ISY994 fan device."""
self._node.on(val=STATE_TO_VALUE.get(speed, 255))
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn on command to the ISY994 fan device."""
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 fan device."""
self._node.off()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
class ISYFanProgram(ISYFanDevice):
"""Representation of an ISY994 fan program."""
def __init__(self, name: str, node, actions) -> None:
"""Initialize the ISY994 fan program."""
super().__init__(node)
self._name = name
self._actions = actions
def turn_off(self, **kwargs) -> None:
"""Send the turn on command to ISY994 fan program."""
if not self._actions.runThen():
_LOGGER.error("Unable to turn off the fan")
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn off command to ISY994 fan program."""
if not self._actions.runElse():
_LOGGER.error("Unable to turn on the fan")
@property
def supported_features(self) -> int:
"""Flag supported features."""
return 0 | unknown | codeparrot/codeparrot-clean | ||
// Copyright IBM Corp. 2016, 2025
// SPDX-License-Identifier: BUSL-1.1
//go:build !enterprise
package audit
import "fmt"
// validate ensures that this if we're not running Vault Enterprise, we cannot
// supply Enterprise-only audit configuration options.
func (c *BackendConfig) validate() error {
if HasInvalidOptions(c.Config) {
return fmt.Errorf("enterprise-only options supplied: %w", ErrExternalOptions)
}
return nil
} | go | github | https://github.com/hashicorp/vault | audit/backend_config_ce.go |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.