code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#! /usr/bin/env python
# repeated_stimulation.py
#
# Copyright (C) 2010 The NEST Initiative
"""
Simple example for how to repeat a stimulation protocol
using the 'origin' property of devices.
In this example, a poisson_generator generates a spike train that is
recorded directly by a spike_detector, using the following paradigm:
1. A single trial last for 1000ms.
2. Within each trial, the poisson_generator is active from 100ms to 500ms.
We achieve this by defining the 'start' and 'stop' properties of the
generator to 100ms and 500ms, respectively, and setting the 'origin' to the
simulation time at the beginning of each trial. Start and stop are interpreted
relative to the origin.
"""
import cynest as nest
# parameters
rate = 1000.0 # generator rate in spikes/s
start = 100.0 # start of simulation relative to trial start, in ms
stop = 500.0 # end of simulation relative to trial start, in ms
trial_duration = 1000.0 # trial duration, in ms
num_trials = 5 # number of trials to perform
# set up network
nest.ResetKernel()
pg = nest.Create('poisson_generator',
params = {'rate' : rate,
'start' : start,
'stop' : stop}
)
sd = nest.Create('spike_detector')
nest.Connect(pg, sd)
# before each trial, we set the 'origin' of the poisson_generator to the current
# simulation time
for n in xrange(num_trials):
nest.SetStatus(pg, {'origin': nest.GetKernelStatus()['time']})
nest.Simulate(trial_duration)
# now plot the result, including a histogram
# note: The histogram will show spikes seemingly located before 100ms into
# each trial. This is due to sub-optimal automatic placement of histogram bin borders.
import cynest.raster_plot
nest.raster_plot.from_device(sd, hist=True, hist_binwidth=100.,
title='Repeated stimulation by Poisson generator')
nest.raster_plot.show() | unknown | codeparrot/codeparrot-clean | ||
from __future__ import print_function
import base64
import logging
import os
import unittest
import urllib
import urllib2
import urlparse
import wptserve
logging.basicConfig()
wptserve.logger.set_logger(logging.getLogger())
here = os.path.split(__file__)[0]
doc_root = os.path.join(here, "docroot")
class Request(urllib2.Request):
def __init__(self, *args, **kwargs):
urllib2.Request.__init__(self, *args, **kwargs)
self.method = "GET"
def get_method(self):
return self.method
def add_data(self, data):
if hasattr(data, "iteritems"):
data = urllib.urlencode(data)
print(data)
self.add_header("Content-Length", str(len(data)))
urllib2.Request.add_data(self, data)
class TestUsingServer(unittest.TestCase):
def setUp(self):
self.server = wptserve.server.WebTestHttpd(host="localhost",
port=0,
use_ssl=False,
certificate=None,
doc_root=doc_root)
self.server.start(False)
def tearDown(self):
self.server.stop()
def abs_url(self, path, query=None):
return urlparse.urlunsplit(("http", "%s:%i" % (self.server.host, self.server.port), path, query, None))
def request(self, path, query=None, method="GET", headers=None, body=None, auth=None):
req = Request(self.abs_url(path, query))
req.method = method
if headers is None:
headers = {}
for name, value in headers.iteritems():
req.add_header(name, value)
if body is not None:
req.add_data(body)
if auth is not None:
req.add_header("Authorization", "Basic %s" % base64.b64encode('%s:%s' % auth))
return urllib2.urlopen(req) | unknown | codeparrot/codeparrot-clean | ||
import sys
import time
from numpy import *
UsingIronPython = False
if sys.subversion[0] == 'IronPython':
import System
UsingIronPython = True
#import numbers
#from random import random
class Complex(object):
def __init__(self, r, i):
self.__r = r
self.__i = i
def __eq__(self, other):
return (self.__r == other.__r) and (self.__i == other.__i)
def __ne__(self, other):
return not (self == other)
def __add__(self, other):
return Complex(self.__r + other.__r, self.__i + other.__i)
def __sub__(self, other):
return Complex(self.__r - other.__r, self.__i - other.__i)
def __mul__(self, other):
return Complex(self.__r * other.__r - self.__i * other.__i, self.__r * other.__i + self.__i * other.__r)
def __str__(self):
return "(%f, %f)" % (self.__r, self.__i)
sizes = (10, 100, 1000, 10000, 100000, 1000000)
print "sizes,", ",".join([str(s) for s in sizes])
def random():
# Resulting distribution isn't very random, could be better.
return 0.25
def creationTest(iters):
for size in sizes:
t0 = time.clock()
a = 0
for i in xrange(iters):
a = ndarray(size)
a.flat = i
t1 = time.clock()
times[size] = t1-t0
print "creation,", ",".join([str(t) for (s, t) in sorted(times.items())])
def viewCreationTest(iters):
for size in sizes:
t0 = time.clock()
a = ndarray(size)
for i in xrange(iters):
b = a[1:-1]
#b.Dispose()
t1 = time.clock()
times[size] = t1-t0
print "view creation,", ",".join([str(t) for (s, t) in sorted(times.items())])
def basicData():
tens = []
twenties = []
results = []
for size in sizes:
a = ndarray(size)
a[:] = 10
if a[0] != 10 or a[size-1] != 10:
print "Error: 'tens' array not initialized correctly (%s, %s)." % (a[0], a[size-1])
tens.append(a)
a = ndarray(size)
a[:] = 20
twenties.append(a)
a = ndarray(size)
results.append(a)
return tens, twenties, results
def multiplyTest(iters):
tens, twenties, results = basicData()
for i, size in enumerate(sizes):
a = tens[i]
b = twenties[i]
c = results[i]
t0 = time.clock()
for j in xrange(longIter):
multiply(a, b, c)
t1 = time.clock()
times[size] = t1-t0
if c[0] != 200:
print "Error: multiply produced incorrect value for c[0] (%s, expected 200)." % c[0]
if c[size-1] != 200:
print "Error: multiply produced incorect value for c[-1] (%s, expected 200)." % c[size-1]
print "multiply,", ",".join([str(t) for (s, t) in sorted(times.items())])
def addTest(iters):
tens, twenties, results = basicData()
for i, size in enumerate(sizes):
a = tens[i]
b = twenties[i]
c = results[i]
t0 = time.clock()
for j in xrange(iters):
for k in xrange(size):
c[k] = a[k] + b[k]
t1 = time.clock();
times[size] = t1-t0
if c[0] != 30 or c[size-1] != 30:
print "Error: add produced incorrect values for c[0], c[-1]: %s, %s expected 30, 30" % (c[0], c[size-1])
print "add,", ",".join([str(t) for (s, t) in sorted(times.items())])
def derivativeTest(iters):
# Derivative test
for size in sizes:
a = ndarray(size)
a[0] = 1.0
for j in range(1, size):
a[j] = a[j-1] + random() - 0.5 # Simple random walk
dt = 1.0
# Time the derivative calc.
tmp = ndarray(size-1)
t0 = time.clock()
dx = ndarray(size-1)
for j in range(iters):
try:
subtract(a[1:], a[:-1], tmp)
#dx = divide(tmp, dt, dx)
dx = tmp / dt
except Exception as e:
print "j = %s, tmp = %d\ndt = %s" % (j, len(tmp), dt)
raise e
t1 = time.clock()
times[size] = t1-t0
a = 0
dx = 0
print "derivative," , ",".join([str(t) for (s, t) in sorted(times.items())])
def convolutionTest(iters):
# Convolution.
for i in range(1, len(sizes)):
size = sizes[i]
size2 = sizes[0]
a = array(size)
b = array(size2)
a[0] = 1.0
b[0] = 1.0
for j in range(1, size):
a[j] = 1.0 #a[i] + random() - 0.5 # Simple random walk
for j in range(1, size2):
b[j] = 1.0 #b[j] + random() - 0.5
P, Q, N = len(a), len(b), len(a)+len(b)-1
r1 = range(iters)
r2 = range(N-1)
t0 = time.clock()
z = ndarray(N, 1)
#aa = a[0:9]
#bb = b[0:9]
tmp = ndarray(9, 1)
for j in r1:
for k in r2:
lower, upper = max(0, k-(Q-1)), min(P-1, k)
if lower <> upper and upper-lower == 9:
aa = a[lower:upper]
bb = b[k-upper:k-lower]
#tmp = aa * bb
multiply(aa, bb, tmp)
z[k] = tmp[0]
#z[k] = (a[lower:upper] * b[k-upper:k-lower])[0]
#tmp.Dispose()
#bb.Dispose()
#aa.Dispose()
t1 = time.clock()
times[size] = t1-t0
print "convolution,", ",".join([str(t) for (s, t) in sorted(times.items())])
def objectArrayTest(iters):
# Test again with object types.
for i, size in enumerate(sizes[:-1]):
with ndarray(size, 0) as a:
with ndarray(size, 0) as b:
with ndarray(size, 0) as c:
bValue = Complex(5.2, -3.2)
for p in xrange(size):
a[p] = Complex(14.2*i, 1.2*i+5.0)
b[p] = bValue
t0 = time.clock()
for j in xrange(iters):
subtract(a, b, c)
t1 = time.clock()
times[size] = t1-t0
print "object subtract,", ",".join([str(t) for (s, t) in sorted(times.items())])
def collect():
if UsingIronPython:
t0 = time.clock()
System.GC.Collect()
System.GC.WaitForPendingFinalizers()
t1 = time.clock()
print "Garbage collection time: %s" % (t1-t0)
longIter = 100
for k in range(3):
times = {}
#time.sleep(10);
#print "Starting creation test."
creationTest(longIter)
collect()
viewCreationTest(10000)
collect()
#print "Multiply test"
multiplyTest(longIter)
collect()
#print "Add test"
addTest(5)
collect()
#print "Derivative test"
derivativeTest(2000)
collect()
#convolutionTest(5)
collect()
if 0 and UsingIronPython:
objectArrayTest(20)
collect() | unknown | codeparrot/codeparrot-clean | ||
import { compile } from '../src'
describe('ssr compile: suspense', () => {
test('implicit default', () => {
expect(compile(`<suspense><foo/></suspense>`).code).toMatchInlineSnapshot(`
"const { resolveComponent: _resolveComponent, withCtx: _withCtx } = require("vue")
const { ssrRenderComponent: _ssrRenderComponent, ssrRenderSuspense: _ssrRenderSuspense } = require("vue/server-renderer")
return function ssrRender(_ctx, _push, _parent, _attrs) {
const _component_foo = _resolveComponent("foo")
_ssrRenderSuspense(_push, {
default: () => {
_push(_ssrRenderComponent(_component_foo, null, null, _parent))
},
_: 1 /* STABLE */
})
}"
`)
})
test('explicit slots', () => {
expect(
compile(`<suspense>
<template #default>
<foo/>
</template>
<template #fallback>
loading...
</template>
</suspense>`).code,
).toMatchInlineSnapshot(`
"const { resolveComponent: _resolveComponent, withCtx: _withCtx } = require("vue")
const { ssrRenderComponent: _ssrRenderComponent, ssrRenderSuspense: _ssrRenderSuspense } = require("vue/server-renderer")
return function ssrRender(_ctx, _push, _parent, _attrs) {
const _component_foo = _resolveComponent("foo")
_ssrRenderSuspense(_push, {
default: () => {
_push(_ssrRenderComponent(_component_foo, null, null, _parent))
},
fallback: () => {
_push(\` loading... \`)
},
_: 1 /* STABLE */
})
}"
`)
})
}) | typescript | github | https://github.com/vuejs/core | packages/compiler-ssr/__tests__/ssrSuspense.spec.ts |
# ===========================================================================
# Copyright 2013 University of Limerick
#
# This file is part of DREAM.
#
# DREAM is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DREAM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DREAM. If not, see <http://www.gnu.org/licenses/>.
# ===========================================================================
# See http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) | unknown | codeparrot/codeparrot-clean | ||
##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2012 Uwe Hermann <uwe@hermann-uwe.de>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
UART (Universal Asynchronous Receiver Transmitter) is a simple serial
communication protocol which allows two devices to talk to each other.
This decoder should work on all "UART-like" async protocols with one
start bit (0), 5-9 databits, an (optional) parity bit, and one or more
stop bits (1), in this order.
It can be run on one signal line (RX or TX) only, or on two lines (RX + TX).
There are various standards for the physical layer specification of the
signals, including RS232, (TTL) UART, RS485, and others. However, the logic
level of the respective pins is only relevant when acquiring the data via
a logic analyzer (you have to select the correct logic analyzer and/or
the correct place where to probe). Once the data is in digital form and
matches the "UART" description above, this protocol decoder can work with
it though, no matter whether the source was on TTL UART levels, or RS232,
or others.
'''
from .pd import Decoder | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
"""
Module to define matching template for checklist spreadsheet
"""
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from gridmatch import (
GridMatchReport, GridMatchError, GridMatch,
text, anyval, regexval, refval, intval, save, value, error, trace
)
checklist_start = value("matchtemplate", "checklist")
prefix = text("") + regexval(r"\w+", "prefix") + refval("uri")
prefixes = (text("Prefixes:").skipdownto()
// text("Prefixes:")
// prefix.repeatdown("prefixes", min=1, dkey="prefix", dval="uri")
)
checklist = text("") + regexval(r".+", "target_urit") + anyval("purpose") + refval("model")
checklists = (text("Checklists:").skipdownto()
// text("Checklists:")
// checklist.repeatdown("checklists", min=1)
)
itemlevel = save("level") + (text("MUST") | text("SHOULD") | text("MAY"))
checkitem = anyval("seq") + itemlevel + refval("reqid")
model = ( text("Model:").skipdownto()
// (text("Model:") + refval("modelid"))
// text("Items:")
// checkitem.repeatdown("items")
)
models = model.repeatdown("models", min=1)
matchforeach = ( (text("ForEach:") + regexval(".+", "foreach"))
// (text("ResultMod:") + regexval(".+", "result_mod")).optional()
// (text("Exists:") + regexval(".+", "exists")).optional()
// (text("Aggregates:") + regexval(".+", "aggregates")).optional()
// (text("IsLive:") + regexval(".+", "islive")).optional()
// (text("Min:") + intval("min")).optional()
// (text("Max:") + intval("max")).optional()
)
matchexists = text("Exists:") + regexval(".+", "exists")
matchsoftware = ( (text("Command:") + anyval("command"))
// (text("Response:") + anyval("response"))
)
rulebody = ( matchforeach
| matchexists
| matchsoftware
| error("No rule body found")
)
collectvarlist = ( ( regexval("\?\w+", "collectvar") + text("as:") + regexval("\?\w+", "collectlist") )
| trace("collectvarlist not matched")
)
collectall = ( text("Collect:") + collectvarlist )
collectpass = ( text("CollectPass:") + collectvarlist )
collectfail = ( text("CollectFail:") + collectvarlist )
collectvars = ( collectall.repeatdown("collectall")
// collectpass.repeatdown("collectpass")
// collectfail.repeatdown("collectfail")
)
rulediag = ( (text("Pass:") + anyval("pass"))
// (text("Fail:") + anyval("fail"))
// (text("None:") + anyval("miss")).optional()
)
requirement = ( text("Rule:").skipdownto()
// (text("Rule:") + refval("reqid"))
// (text("") + (rulebody // collectvars // rulediag))
)
requirements = requirement.repeatdown("requirements", min=1)
checklist_end = text("End:").skipdownto() // text("End:")
checklist = ( checklist_start
// prefixes
// checklists
// models
// requirements
// checklist_end
)
# Example data matched by the above:
#
# Prefixes:,Prefix,URI,,,@@NOTE: there is a shortcoming in the present Minim model and implementation that means there is no way to add new prefixes to those predefined in the minim evaluation code. Noted as technical debt fix.,
# ,rdf,http://www.w3.org/1999/02/22-rdf-syntax-ns#,,,,
# ,rdfs,http://www.w3.org/2000/01/rdf-schema#,,,,
# ,owl,http://www.w3.org/2002/07/owl#,,,,
# ,xsd,http://www.w3.org/2001/XMLSchema#,,,,
# ,xml,http://www.w3.org/XML/1998/namespace,,,,
# ,rdfg,http://www.w3.org/2004/03/trix/rdfg-1/,,,,
# ,ore,http://www.openarchives.org/ore/terms/,,,,
# ,ao,http://purl.org/ao/,,,,
# ,dcterms,http://purl.org/dc/terms/,,,,
# ,foaf,http://xmlns.com/foaf/0.1/,,,,
# ,ro,http://purl.org/wf4ever/ro#,,,,
# ,wfprov,http://purl.org/wf4ever/wfprov#,,,,
# ,wfdesc,http://purl.org/wf4ever/wfdesc#,,,,
# ,wf4ever,http://purl.org/wf4ever/wf4ever#,,,,
# ,minim,http://purl.org/minim/minim#,,,,
# Checklists:,Target,Purpose,Model,,Description,
# ,{+targetro},ready-to-release,#experiment_complete_model,,Checklist to be satisfied if the target RO is to be considered a complete and fully-described workflow experiment.,
# ,{+targetro},wf-accessible,#wf_accessible_model,,Checklist to test workflow accessible item in isolation,
# ,,,,,,
# Model:,#experiment_complete_model,,,,This model defines information that must be satisfied by the target RO for the target RO to be considered a complete and fully-described workflow experiment.,
# Items:,Level,Rule,,,,
# 010,SHOULD,#RO_has_hypothesys,,,RO should contain a resource describing the hypothesis the experiment is intended to test,
# 020,SHOULD,#RO_has_sketch,,,RO should contain a resource that is a high level sketch of the workflow that is used to test the hypothesys,
# 030,MUST,#WF_accessible,,,The RO must contain an accessible workflow definition,
# 040,MUST,#WF_services_accessible,,,All services used by the workflow must be live,
# 050,MUST,#RO_has_inputdata,,,The RO must specify input data that is used by the workflow,
# 060,SHOULD,#RO_has_conclusion,,,The RO should contain a resource that describes outcomes and conclusions obtained by running the workflow. ,
# Model:,#wf_accessible_model,,,,Model to test workflow accessible item in isolation,
# Items:,Level,Rule,,,,
# 030,MUST,#WF_accessible,,,The RO must contain an accessible workflow definition
# Define rules to test individual requirements,,,,,
# Rule:,#RO_has_hypothesys,,,,
# ,Exists:,?hypothesis rdf:type roterms:Hypothesis,,,
# ,Pass:,Experiment hypothesis is present,,,
# ,Fail:,Experiment hypothesis is not present,,,
# Rule:,#RO_has_sketch,,,,
# ,Exists:,?sketch rdf:type roterms:Sketch,,,
# ,Pass:,Workflow design sketch is present,,,
# ,Fail:,Workflow design sketch is not present,,,
# Rule:,#WF_accessible,,,,
# ,ForEach:,"?wf rdf:type wfdesc:Workflow ;
# rdfs:label ?wflab ;
# wfdesc:hasWorkflowDefinition ?wfdef",,,
# ,IsLive:,{+wfdef},,,
# ,Pass:,All workflow definitions are accessible,,,
# ,Fail:,The definition for workflow <i>%(wflab)s</i> is not accessible,,,
# ,None:,No workflow definitions are present,,,
# Rule:,#WF_services_accessible,,,,
# ,ForEach:,"?pr rdf:type wfdesc:Process ;
# rdfs:label ?prlab .
# { ?pr wf4ever:serviceURI ?pruri }
# UNION
# { ?pr wf4ever:wsdlURI ?pruri }",,,
# ,IsLive:,{+pruri},,,
# ,Pass:,All web services used by workflows are accessible,,,
# ,Fail:,"One or more web services used by a workflow are inaccessible, including <a href=""%(pruri)s""><i>%(prlab)s</i></a>",,,
# ,None:,No web services are referenced by any workflow,,,
# Rule:,#RO_has_inputdata,,,,
# ,Exists:,?wfbundle roterms:inputSelected ?inputdata,,,
# ,Pass:,Input data is present,,,
# ,Fail:,Input data is not present,,,
# Rule:,#RO_has_conclusion,,,,
# ,Exists:,?conclusion rdf:type roterms:Conclusions,,,
# ,Pass:,Experiment conclusions are present,,,
# ,Fail:,Experiment conclusions are not present,,,
# End:,,,,,
# End. | unknown | codeparrot/codeparrot-clean | ||
import time
from indy import anoncreds, wallet
import json
import logging
from indy import pool
from src.utils import run_coroutine, PROTOCOL_VERSION
logger = logging.getLogger(__name__)
async def demo():
logger.info("Anoncreds sample -> started")
issuer = {
'did': 'NcYxiDXkpYi6ov5FcYDi1e',
'wallet_config': json.dumps({'id': 'issuer_wallet'}),
'wallet_credentials': json.dumps({'key': 'issuer_wallet_key'})
}
prover = {
'did': 'VsKV7grR1BUE29mG2Fm2kX',
'wallet_config': json.dumps({"id": "prover_wallet"}),
'wallet_credentials': json.dumps({"key": "issuer_wallet_key"})
}
verifier = {}
store = {}
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
# 1. Create Issuer Wallet and Get Wallet Handle
await wallet.create_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
issuer['wallet'] = await wallet.open_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 2. Create Prover Wallet and Get Wallet Handle
await wallet.create_wallet(prover['wallet_config'], prover['wallet_credentials'])
prover['wallet'] = await wallet.open_wallet(prover['wallet_config'], prover['wallet_credentials'])
# 3. Issuer create Credential Schema
schema = {
'name': 'gvt',
'version': '1.0',
'attributes': '["age", "sex", "height", "name"]'
}
issuer['schema_id'], issuer['schema'] = await anoncreds.issuer_create_schema(issuer['did'], schema['name'],
schema['version'],
schema['attributes'])
store[issuer['schema_id']] = issuer['schema']
# 4. Issuer create Credential Definition for Schema
cred_def = {
'tag': 'cred_def_tag',
'type': 'CL',
'config': json.dumps({"support_revocation": False})
}
issuer['cred_def_id'], issuer['cred_def'] = await anoncreds.issuer_create_and_store_credential_def(
issuer['wallet'], issuer['did'], issuer['schema'], cred_def['tag'], cred_def['type'], cred_def['config'])
store[issuer['cred_def_id']] = issuer['cred_def']
# 5. Prover create Master Secret
prover['master_secret_id'] = await anoncreds.prover_create_master_secret(prover['wallet'], None)
# 6. Issuer create Credential Offer
issuer['cred_offer'] = await anoncreds.issuer_create_credential_offer(issuer['wallet'], issuer['cred_def_id'])
prover['cred_offer'] = issuer['cred_offer']
cred_offer = json.loads(prover['cred_offer'])
prover['cred_def_id'] = cred_offer['cred_def_id']
prover['schema_id'] = cred_offer['schema_id']
prover['cred_def'] = store[prover['cred_def_id']]
prover['schema'] = store[prover['schema_id']]
# 7. Prover create Credential Request
prover['cred_req'], prover['cred_req_metadata'] = \
await anoncreds.prover_create_credential_req(prover['wallet'], prover['did'], prover['cred_offer'],
prover['cred_def'], prover['master_secret_id'])
# 8. Issuer create Credential
prover['cred_values'] = json.dumps({
"sex": {"raw": "male", "encoded": "5944657099558967239210949258394887428692050081607692519917050011144233"},
"name": {"raw": "Alex", "encoded": "1139481716457488690172217916278103335"},
"height": {"raw": "175", "encoded": "175"},
"age": {"raw": "28", "encoded": "28"}
})
issuer['cred_values'] = prover['cred_values']
issuer['cred_req'] = prover['cred_req']
(cred_json, _, _) = await anoncreds.issuer_create_credential(issuer['wallet'], issuer['cred_offer'],
issuer['cred_req'], issuer['cred_values'], None, None)
prover['cred'] = cred_json
# 9. Prover store Credential
await anoncreds.prover_store_credential(prover['wallet'], None, prover['cred_req_metadata'], prover['cred'],
prover['cred_def'], None)
# 10. Verifier builds Proof Request
nonce = await anoncreds.generate_nonce()
verifier['proof_req'] = json.dumps({
'nonce': nonce,
'name': 'proof_req_1',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'name'}
},
'requested_predicates': {
'predicate1_referent': {'name': 'age', 'p_type': '>=', 'p_value': 18}
}
})
prover['proof_req'] = verifier['proof_req']
# Prover gets Credentials for Proof Request
prover['cred_search_handle'] = \
await anoncreds.prover_search_credentials_for_proof_req(prover['wallet'], prover['proof_req'], None)
# Prover gets Credentials for attr1_referent
creds_for_attr1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'attr1_referent', 10)
prover['cred_for_attr1'] = json.loads(creds_for_attr1)[0]['cred_info']
# Prover gets Credentials for predicate1_referent
creds_for_predicate1 = await anoncreds.prover_fetch_credentials_for_proof_req(prover['cred_search_handle'],
'predicate1_referent', 10)
prover['cred_for_predicate1'] = json.loads(creds_for_predicate1)[0]['cred_info']
await anoncreds.prover_close_credentials_search_for_proof_req(prover['cred_search_handle'])
# 11. Prover create Proof for Proof Request
prover['requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {'attr1_referent': {'cred_id': prover['cred_for_attr1']['referent'], 'revealed': True}},
'requested_predicates': {'predicate1_referent': {'cred_id': prover['cred_for_predicate1']['referent']}}
})
schemas_json = json.dumps({prover['schema_id']: json.loads(prover['schema'])})
cred_defs_json = json.dumps({prover['cred_def_id']: json.loads(prover['cred_def'])})
revoc_states_json = json.dumps({})
prover['proof'] = await anoncreds.prover_create_proof(prover['wallet'], prover['proof_req'],
prover['requested_creds'],
prover['master_secret_id'], schemas_json, cred_defs_json,
revoc_states_json)
verifier['proof'] = prover['proof']
# 12. Verifier verify proof
proof = json.loads(verifier['proof'])
assert 'Alex' == proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
identifier = proof['identifiers'][0]
verifier['cred_def_id'] = identifier['cred_def_id']
verifier['schema_id'] = identifier['schema_id']
verifier['cred_def'] = store[verifier['cred_def_id']]
verifier['schema'] = store[verifier['schema_id']]
schemas_json = json.dumps({verifier['schema_id']: json.loads(verifier['schema'])})
cred_defs_json = json.dumps({verifier['cred_def_id']: json.loads(verifier['cred_def'])})
revoc_ref_defs_json = "{}"
revoc_regs_json = "{}"
assert await anoncreds.verifier_verify_proof(verifier['proof_req'], verifier['proof'], schemas_json, cred_defs_json,
revoc_ref_defs_json, revoc_regs_json)
# 13. Close and delete Issuer wallet
await wallet.close_wallet(issuer['wallet'])
await wallet.delete_wallet(issuer['wallet_config'], issuer['wallet_credentials'])
# 14. Close and delete Prover wallet
await wallet.close_wallet(prover['wallet'])
await wallet.delete_wallet(prover['wallet_config'], prover['wallet_credentials'])
logger.info("Anoncreds sample -> completed")
if __name__ == '__main__':
run_coroutine(demo)
time.sleep(1) # FIXME waiting for libindy thread complete | unknown | codeparrot/codeparrot-clean | ||
"""
Given a continuous time first order transfer function of the form:
n1 * s + n0
-----------
s + d0
Compute the Tustin approximation and return a state space realization of this
discrete time transfer function.
"""
from sympy import symbols, Poly, ccode, S, sqrt
def discrete_realization_tustin(n0, n1, d0, T):
z = symbols('z')
s = 2/T*(z-1)/(z+1)
num = ((n1*s + n0)*T*(z + 1)).simplify()
den = ((s + d0)*T*(z + 1)).simplify()
num_poly = Poly(num, z)
den_poly = Poly(den, z)
n1_z, n0_z = num_poly.coeffs()
d1_z, d0_z = den_poly.coeffs()
# Make denominator monic and divide numerator appropriately
n1_z /= d1_z
n0_z /= d1_z
d0_z /= d1_z
a = -d0_z
b_times_c = (n0_z - n1_z * d0_z).simplify()
d = n1_z
return a, b_times_c, d
n0, n1, d0, T = symbols('n0 n1 d0 T')
#T = 0.0013
#n0 = 1.23
#n1 = 4.56
#d0 = 7.89
a, b_times_c, d = discrete_realization_tustin(n0, n1, d0, T)
#a, b_times_c, d = discrete_realization_zoh(n0, n1, d0, T)
a_str = ccode(a)
b_times_c_str = ccode(b_times_c)
d_str = ccode(d)
print(a_str)
print(b_times_c_str)
print(d_str) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
#
# Copyright (c) 2008 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt) or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
tester = BoostBuild.Tester()
################################################################################
#
# Test without giving the project an explicit id.
#
################################################################################
tester.write("jamroot.jam", """
lib test : test.cpp ;
project : requirements <library>test ;
build-project a ;
""")
tester.write("test.cpp", """
#ifdef _WIN32
__declspec(dllexport)
#endif
void foo() {}
""")
tester.write("a/test1.cpp", """
int main() {}
""")
tester.write("a/jamfile.jam", """
exe test1 : test1.cpp ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test.obj")
tester.expect_addition("a/bin/$toolset/debug/test1.exe")
tester.rm("bin")
tester.rm("a/bin")
################################################################################
#
# Run the same test from the "a" directory.
#
################################################################################
tester.run_build_system(subdir="a")
tester.expect_addition("bin/$toolset/debug/test.obj")
tester.expect_addition("a/bin/$toolset/debug/test1.exe")
tester.rm("bin")
tester.rm("a/bin")
################################################################################
#
# This time, do give the project an id.
#
################################################################################
tester.write("jamroot.jam", """
lib test : test.cpp ;
project test_project : requirements <library>test ;
build-project a ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test.obj")
tester.expect_addition("a/bin/$toolset/debug/test1.exe")
tester.rm("bin")
tester.rm("a/bin")
################################################################################
#
# Now, give the project an id in its attributes.
#
################################################################################
tester.write("jamroot.jam", """
lib test : test.cpp ;
project : id test_project : requirements <library>test ;
build-project a ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test.obj")
tester.expect_addition("a/bin/$toolset/debug/test1.exe")
tester.rm("bin")
tester.rm("a/bin")
################################################################################
#
# Give the project an id in both ways at once.
#
################################################################################
tester.write("jamroot.jam", """
lib test : test.cpp ;
project test_project1 : id test_project : requirements <library>test ;
build-project a ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test.obj")
tester.expect_addition("a/bin/$toolset/debug/test1.exe")
tester.rm("bin")
tester.rm("a/bin")
################################################################################
#
# Test an absolute path in native format.
#
################################################################################
tester.write("jamroot.jam", """
import path ;
path-constant here : . ;
current-location = [ path.native [ path.root [ path.make $(here) ] [ path.pwd ]
] ] ;
project test : requirements <source>$(current-location)/a/test1.cpp ;
exe test : test.cpp ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test.exe")
tester.rm("bin")
tester.rm("a/bin")
################################################################################
#
# Test an absolute path in canonical format.
#
################################################################################
tester.write("jamroot.jam", """
import path ;
path-constant here : . ;
current-location = [ path.root [ path.make $(here) ] [ path.pwd ] ] ;
project test : requirements <source>$(current-location)/a/test1.cpp ;
exe test : test.cpp ;
""")
tester.run_build_system()
tester.expect_addition("bin/$toolset/debug/test.exe")
tester.rm("bin")
tester.rm("a/bin")
################################################################################
#
# Test dependency properties (e.g. <source>) whose targets are specified using a
# relative path.
#
################################################################################
# Use jamroot.jam rather than jamfile.jam to avoid inheriting the <source> from
# the parent as that would would make test3 a source of itself.
tester.write("b/jamroot.jam", """
obj test3 : test3.cpp ;
""")
tester.write("b/test3.cpp", """
void bar() {}
""")
tester.write("jamroot.jam", """
project test : requirements <source>b//test3 ;
build-project a ;
""")
tester.write("a/jamfile.jam", """
exe test : test1.cpp ;
""")
tester.write("a/test1.cpp", """
void bar();
int main() { bar(); }
""")
tester.run_build_system()
tester.expect_addition("b/bin/$toolset/debug/test3.obj")
tester.expect_addition("a/bin/$toolset/debug/test.exe")
tester.rm("bin")
tester.rm("a")
tester.rm("jamroot.jam")
tester.rm("test.cpp")
################################################################################
#
# Test that source-location is respected.
#
################################################################################
tester.write("build/jamroot.jam", """
project : requirements <source>test.cpp : source-location ../src ;
""")
tester.write("src/test.cpp", """
int main() {}
""")
tester.write("build/a/jamfile.jam", """
project : source-location ../../a_src ;
exe test : test1.cpp ;
""")
tester.write("a_src/test1.cpp", """
""")
tester.run_build_system(subdir="build/a")
tester.expect_addition("build/a/bin/$toolset/debug/test.exe")
tester.cleanup() | unknown | codeparrot/codeparrot-clean | ||
// This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
//! Parsing, manipulating, and serializing Unicode Language and Locale Identifiers.
//!
//! This module is published as its own crate ([`icu_locale_core`](https://docs.rs/icu_locale_core/latest/icu_locale_core/))
//! and as part of the [`icu`](https://docs.rs/icu/latest/icu/) crate. See the latter for more details on the ICU4X project.
//!
//! The module provides algorithms for parsing a string into a well-formed language or locale identifier
//! as defined by [`UTS #35: Unicode LDML 3. Unicode Language and Locale Identifiers`]. Additionally
//! the module provides [`preferences`] interface for operations on locale preferences and conversions
//! from and to locale unicode extensions.
//!
//! [`Locale`] is the most common structure to use for storing information about a language,
//! script, region, variants and extensions. In almost all cases, this struct should be used as the
//! base unit for all locale management operations.
//!
//! [`LanguageIdentifier`] is a strict subset of [`Locale`] which can be useful in a narrow range of
//! cases where [`Unicode Extensions`] are not relevant.
//!
//! If in doubt, use [`Locale`].
//!
//! # Examples
//!
//! ```
//! use icu::locale::Locale;
//! use icu::locale::{
//! locale,
//! subtags::{language, region},
//! };
//!
//! let mut loc: Locale = locale!("en-US");
//!
//! assert_eq!(loc.id.language, language!("en"));
//! assert_eq!(loc.id.script, None);
//! assert_eq!(loc.id.region, Some(region!("US")));
//! assert_eq!(loc.id.variants.len(), 0);
//!
//! loc.id.region = Some(region!("GB"));
//!
//! assert_eq!(loc, locale!("en-GB"));
//! ```
//!
//! For more details, see [`Locale`] and [`LanguageIdentifier`].
//!
//! [`UTS #35: Unicode LDML 3. Unicode Language and Locale Identifiers`]: https://unicode.org/reports/tr35/tr35.html#Unicode_Language_and_Locale_Identifiers
//! [`ICU4X`]: ../icu/index.html
//! [`Unicode Extensions`]: extensions
// https://github.com/unicode-org/icu4x/blob/main/documents/process/boilerplate.md#library-annotations
#![cfg_attr(not(any(test, doc)), no_std)]
#![cfg_attr(
not(test),
deny(
clippy::indexing_slicing,
clippy::unwrap_used,
clippy::expect_used,
clippy::panic,
clippy::exhaustive_structs,
clippy::exhaustive_enums,
clippy::trivially_copy_pass_by_ref,
missing_debug_implementations,
)
)]
#![warn(missing_docs)]
#[cfg(feature = "alloc")]
extern crate alloc;
#[macro_use]
mod helpers;
mod data;
mod langid;
mod locale;
mod macros;
mod parser;
mod shortvec;
pub use data::DataLocale;
pub use langid::LanguageIdentifier;
pub use locale::Locale;
pub use parser::ParseError;
pub mod extensions;
#[macro_use]
pub mod subtags;
pub mod preferences;
pub mod zerovec;
#[cfg(feature = "serde")]
mod serde;
#[cfg(feature = "databake")]
mod databake; | rust | github | https://github.com/nodejs/node | deps/crates/vendor/icu_locale_core/src/lib.rs |
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/native/Histogram.h>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/_histogramdd_bin_edges.h>
#include <ATen/ops/_histogramdd_bin_edges_native.h>
#include <ATen/ops/_histogramdd_from_bin_cts.h>
#include <ATen/ops/_histogramdd_from_bin_cts_native.h>
#include <ATen/ops/_histogramdd_from_bin_tensors.h>
#include <ATen/ops/_histogramdd_from_bin_tensors_native.h>
#include <ATen/ops/aminmax.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/histc_native.h>
#include <ATen/ops/histogram_native.h>
#include <ATen/ops/histogramdd_native.h>
#include <ATen/ops/linspace.h>
#endif
#include <cmath>
#include <numeric>
#include <tuple>
#include <vector>
#include <functional>
#include <c10/util/ArrayRef.h>
#include <c10/core/ScalarType.h>
#include <c10/core/DefaultDtype.h>
#include <c10/util/irange.h>
/* Implements a numpy-like histogramdd function running on cpu
* https://numpy.org/doc/stable/reference/generated/numpy.histogramdd.html
*
* See the docstr for torch.histogramdd in torch/functional.py for further explanation.
*
* - torch.histogramdd(input, bins, range=None, weight=None, density=False)
* input - tensor with shape (M, N). input is interpreted as M coordinates in N-dimensional space.
* If a tensor with more than 2 dimensions is passed, all but the last dimension will be flattened.
* bins - int[] of length N or tensor list of length N. If int[], defines the number of equal-width bins
* in each dimension. If tensor list, defines the sequences of bin edges, including rightmost edges,
* for each dimension.
* range - float[] of length 2 * N, optional. If specified, defines the leftmost and rightmost bin edges
* for each dimension.
* weight - tensor, optional. If provided, weight should have the same shape as input excluding its last dimension.
* Each N-dimensional value in input contributes its associated weight towards its bin's result.
* If weight is not specified, each value has weight 1 by default.
* density - bool, optional. If false (default), the result will contain the total count (weight) in each bin.
* If True, each count (weight) is divided by the total count (total weight), then divided by the
* volume of its associated bin.
*
* Returns:
* hist - N-dimensional tensor containing the values of the histogram.
* bin_edges - tensor list of length N containing the edges of the histogram bins in each dimension.
* Bins include their left edge and exclude their right edge, with the exception of the
* rightmost bin in each dimension which includes both of its edges.
*
* Restrictions are defined in histogram_check_inputs() and in select_outer_bin_edges().
*/
namespace at::native {
DEFINE_DISPATCH(histogramdd_stub);
DEFINE_DISPATCH(histogramdd_linear_stub);
DEFINE_DISPATCH(histogram_select_outer_bin_edges_stub);
namespace {
/* Checks properties of input tensors input, bins, and weight.
*/
void histogramdd_check_inputs(const Tensor& input, const TensorList& bins, const std::optional<Tensor>& weight) {
TORCH_CHECK(input.dim() >= 2, "torch.histogramdd: input tensor should have at least 2 dimensions, but got ",
input.dim());
const int64_t N = input.size(-1);
TORCH_CHECK(static_cast<int64_t>(bins.size()) == N, "torch.histogramdd: expected ", N, " sequences of bin edges for a ", N,
"-dimensional histogram but got ", bins.size());
auto input_dtype = input.dtype();
for (const auto dim : c10::irange(N)) {
const Tensor& dim_bins = bins[dim];
auto bins_dtype = dim_bins.dtype();
TORCH_CHECK(input_dtype == bins_dtype, "torch.histogramdd: input tensor and bins tensors should",
" have the same dtype, but got input with dtype ", input_dtype,
" and bins for dimension ", dim, " with dtype ", bins_dtype);
const int64_t dim_bins_dim = dim_bins.dim();
TORCH_CHECK(dim_bins_dim == 1, "torch.histogramdd: bins tensor should have one dimension,",
" but got ", dim_bins_dim, " dimensions in the bins tensor for dimension ", dim);
const int64_t numel = dim_bins.numel();
TORCH_CHECK(numel > 0, "torch.histogramdd: bins tensor should have at least 1 element,",
" but got ", numel, " elements in the bins tensor for dimension ", dim);
}
if (weight.has_value()) {
TORCH_CHECK(input.dtype() == weight.value().dtype(), "torch.histogramdd: if weight tensor is provided,"
" input tensor and weight tensor should have the same dtype, but got input(", input.dtype(), ")",
", and weight(", weight.value().dtype(), ")");
/* If a weight tensor is provided, we expect its shape to match that of
* the input tensor excluding its innermost dimension N.
*/
auto input_sizes = input.sizes().vec();
input_sizes.pop_back();
auto weight_sizes = weight.value().sizes().vec();
if (weight_sizes.empty()) {
// correctly handle scalars
weight_sizes = {1};
}
TORCH_CHECK(input_sizes == weight_sizes, "torch.histogramdd: if weight tensor is provided it should have"
" the same shape as the input tensor excluding its innermost dimension, but got input with shape ",
input.sizes(), " and weight with shape ", weight.value().sizes());
}
}
/* Checks properties of output tensors hist and bin_edges, then resizes them.
*/
void histogramdd_prepare_out(const Tensor& input, const std::vector<int64_t>& bin_ct,
const Tensor& hist, const TensorList& bin_edges) {
const int64_t N = input.size(-1);
TORCH_INTERNAL_ASSERT((int64_t)bin_ct.size() == N);
TORCH_INTERNAL_ASSERT((int64_t)bin_edges.size() == N);
TORCH_CHECK(input.dtype() == hist.dtype(), "torch.histogram: input tensor and hist tensor should",
" have the same dtype, but got input ", input.dtype(), " and hist ", hist.dtype());
for (const auto dim : c10::irange(N)) {
TORCH_CHECK(input.dtype() == bin_edges[dim].dtype(), "torch.histogram: input tensor and bin_edges tensor should",
" have the same dtype, but got input ", input.dtype(), " and bin_edges ", bin_edges[dim].dtype(),
" for dimension ", dim);
TORCH_CHECK(bin_ct[dim] > 0,
"torch.histogram(): bins must be > 0, but got ", bin_ct[dim], " for dimension ", dim);
at::native::resize_output(bin_edges[dim], bin_ct[dim] + 1);
}
at::native::resize_output(hist, bin_ct);
}
void histogramdd_prepare_out(const Tensor& input, TensorList bins,
const Tensor& hist, const TensorList& bin_edges) {
std::vector<int64_t> bin_ct(bins.size());
std::transform(bins.begin(), bins.end(), bin_ct.begin(), [](Tensor t) { return t.numel() - 1; });
histogramdd_prepare_out(input, bin_ct, hist, bin_edges);
}
/* Determines the outermost bin edges. For simplicity when calling into aminmax,
* assumes that input has already been reshaped to (M, N).
*/
std::pair<std::vector<double>, std::vector<double>>
select_outer_bin_edges(const Tensor& input, std::optional<c10::ArrayRef<double>> range) {
TORCH_INTERNAL_ASSERT(input.dim() == 2, "expected input to have shape (M, N)");
const int64_t N = input.size(-1);
// Default ranges for empty input matching numpy.histogram's default
std::vector<double> leftmost_edges(N, 0.);
std::vector<double> rightmost_edges(N, 1.);
if (range.has_value()) {
// range is specified
TORCH_CHECK((int64_t)range.value().size() == 2 * N, "torch.histogramdd: for a ", N, "-dimensional histogram",
" range should have ", 2 * N, " elements, but got ", range.value().size());
for (const auto dim : c10::irange(N)) {
leftmost_edges[dim] = range.value()[2 * dim];
rightmost_edges[dim] = range.value()[2 * dim + 1];
}
} else if (input.numel() > 0) {
// non-empty input
histogram_select_outer_bin_edges_stub(input.device().type(), input, N, leftmost_edges, rightmost_edges);
}
for (const auto dim : c10::irange(N)) {
double leftmost_edge = leftmost_edges[dim];
double rightmost_edge = rightmost_edges[dim];
TORCH_CHECK(std::isfinite(leftmost_edge) && std::isfinite(rightmost_edge),
"torch.histogramdd: dimension ", dim, "'s range [",
leftmost_edge, ", ", rightmost_edge, "] is not finite");
TORCH_CHECK(leftmost_edge <= rightmost_edge, "torch.histogramdd: min should not exceed max, but got",
" min ", leftmost_edge, " max ", rightmost_edge, " for dimension ", dim);
// Expand empty range to match numpy behavior and avoid division by 0 in normalization
if (leftmost_edge == rightmost_edge) {
leftmost_edges[dim] -= 0.5;
rightmost_edges[dim] += 0.5;
}
}
return std::make_pair(leftmost_edges, rightmost_edges);
}
/* Bin edges correction based on the precision representation.
* To maintain the backward compatibility we take max(std::nextafter<>, +1)
* and min(std::nextafter<>, -1) for scalar types. For other types +/- 1 as usual.
*/
void bins_edges_correction(const ScalarType& t, double &leftmost_edge, double &rightmost_edge)
{
#define UPDATE_WITH_LIMIT(real_type, scalartype) \
case ScalarType::scalartype: \
leftmost_edge = std::min( \
static_cast<double>( \
std::nexttoward( \
static_cast<real_type>(leftmost_edge), \
std::numeric_limits<real_type>::lowest() \
) \
), \
leftmost_edge - 1. \
); \
rightmost_edge = std::max( \
static_cast<double>( \
std::nexttoward( \
static_cast<real_type>(rightmost_edge), \
std::numeric_limits<real_type>::max() \
) \
), \
rightmost_edge + 1. \
); \
break;
switch (t) {
UPDATE_WITH_LIMIT(double, Double)
UPDATE_WITH_LIMIT(float, Float)
default:
// Fallback to the default behavior for other types
leftmost_edge -= 1;
rightmost_edge += 1;
}
#undef UPDATE_WITH_LIMIT
}
/* histc's version of the logic for outermost bin edges.
*/
std::pair<double, double> histc_select_outer_bin_edges(const Tensor& input,
const Scalar& min, const Scalar& max) {
double leftmost_edge = min.to<double>();
double rightmost_edge = max.to<double>();
if (leftmost_edge == rightmost_edge && input.numel() > 0) {
auto extrema = aminmax(input);
leftmost_edge = std::get<0>(extrema).item<double>();
rightmost_edge = std::get<1>(extrema).item<double>();
}
if (leftmost_edge == rightmost_edge) {
bins_edges_correction(input.dtype().toScalarType(), leftmost_edge, rightmost_edge);
}
TORCH_CHECK(!(std::isinf(leftmost_edge) || std::isinf(rightmost_edge) ||
std::isnan(leftmost_edge) || std::isnan(rightmost_edge)),
"torch.histc: range of [", leftmost_edge, ", ", rightmost_edge, "] is not finite");
TORCH_CHECK(leftmost_edge < rightmost_edge, "torch.histc: max must be larger than min");
return std::make_pair(leftmost_edge, rightmost_edge);
}
} // namespace
static std::vector<Tensor> allocate_bin_edges_tensors(const Tensor& self) {
TORCH_CHECK(self.dim() >= 2, "torch.histogramdd: input tensor should have at least 2 dimensions");
const int64_t N = self.size(-1);
std::vector<Tensor> bin_edges_out(N);
for (const auto dim : c10::irange(N)) {
bin_edges_out[dim] = at::empty({0}, self.options(), MemoryFormat::Contiguous);
}
return bin_edges_out;
}
/* Versions of histogramdd in which bins is a Tensor[] defining the sequences of bin edges.
*/
static Tensor& histogramdd_out(const Tensor& self, TensorList bins,
const std::optional<Tensor>& weight, bool density,
Tensor& hist, TensorList& bin_edges) {
histogramdd_check_inputs(self, bins, weight);
histogramdd_prepare_out(self, bins, hist, bin_edges);
for (const auto dim : c10::irange(bins.size())) {
bin_edges[dim].copy_(bins[dim]);
}
histogramdd_stub(self.device().type(), self, weight, density, hist, bin_edges);
return hist;
}
Tensor _histogramdd(const Tensor& self, TensorList bins,
const std::optional<Tensor>& weight, bool density) {
Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous);
std::vector<Tensor> bin_edges_out = allocate_bin_edges_tensors(self);
TensorList bin_edges_out_tl(bin_edges_out);
histogramdd_out(self, bins, weight, density, hist, bin_edges_out_tl);
return hist;
}
/* Versions of histogramdd in which bins is an int[]
* defining the number of bins in each dimension.
*/
static std::vector<Tensor>& histogramdd_bin_edges_out(const Tensor& self, IntArrayRef bin_ct,
std::optional<c10::ArrayRef<double>> range,
const std::optional<Tensor>& weight, bool density,
std::vector<Tensor>& bin_edges_out) {
TensorList bin_edges_out_tl(bin_edges_out);
const int64_t N = self.size(-1);
const int64_t M = std::accumulate(self.sizes().begin(), self.sizes().end() - 1,
static_cast<int64_t>(1), std::multiplies<int64_t>());
Tensor reshaped_self = self.reshape({ M, N });
auto outer_bin_edges = select_outer_bin_edges(reshaped_self, range);
const int64_t bin_size = bin_ct.size();
TORCH_CHECK(
N == bin_size,
"histogramdd: The size of bins must be equal to the innermost dimension of the input.");
for (const auto dim : c10::irange(N)) {
at::linspace_out(bin_edges_out[dim], outer_bin_edges.first[dim], outer_bin_edges.second[dim],
bin_ct[dim] + 1);
}
return bin_edges_out;
}
std::vector<Tensor> histogramdd_bin_edges(const Tensor& self, IntArrayRef bin_ct,
std::optional<c10::ArrayRef<double>> range,
const std::optional<Tensor>& weight, bool density) {
std::vector<Tensor> bin_edges_out = allocate_bin_edges_tensors(self);
return histogramdd_bin_edges_out(self, bin_ct, range, weight, density, bin_edges_out);
}
static Tensor& histogramdd_out(const Tensor& self, IntArrayRef bin_ct,
std::optional<c10::ArrayRef<double>> range,
const std::optional<Tensor>& weight, bool density,
Tensor& hist, TensorList& bin_edges) {
std::vector<Tensor> bins = histogramdd_bin_edges(self, bin_ct, range, weight, density);
histogramdd_check_inputs(self, bins, weight);
histogramdd_prepare_out(self, bins, hist, bin_edges);
for (const auto dim : c10::irange(bins.size())) {
bin_edges[dim].copy_(bins[dim]);
}
histogramdd_linear_stub(self.device().type(), self, weight, density, hist, bin_edges, true);
return hist;
}
Tensor _histogramdd(const Tensor& self, IntArrayRef bin_ct,
std::optional<c10::ArrayRef<double>> range,
const std::optional<Tensor>& weight, bool density) {
Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous);
std::vector<Tensor> bin_edges_out = allocate_bin_edges_tensors(self);
TensorList bin_edges_out_tl(bin_edges_out);
histogramdd_out(self, bin_ct, range, weight, density, hist, bin_edges_out_tl);
return hist;
}
/* Versions of histogram in which bins is a Tensor defining the sequence of bin edges.
*/
std::tuple<Tensor&, Tensor&>
histogram_out(const Tensor& self, const Tensor& bins,
const std::optional<Tensor>& weight, bool density,
Tensor& hist, Tensor& bin_edges) {
Tensor reshaped_self = self.reshape({ self.numel(), 1 });
std::optional<Tensor> reshaped_weight = weight.has_value()
? weight.value().reshape({ weight.value().numel() }) : weight;
TensorList bins_in = bins;
TensorList bins_out = bin_edges;
histogramdd_out(reshaped_self, bins_in, reshaped_weight, density, hist, bins_out);
return std::forward_as_tuple(hist, bin_edges);
}
std::tuple<Tensor, Tensor>
histogram(const Tensor& self, const Tensor& bins,
const std::optional<Tensor>& weight, bool density) {
Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous);
Tensor bin_edges = at::empty({0}, bins.options(), MemoryFormat::Contiguous);
return histogram_out(self, bins, weight, density, hist, bin_edges);
}
/* Versions of histogram in which bins is an integer specifying the number of equal-width bins.
*/
std::tuple<Tensor&, Tensor&>
histogram_out(const Tensor& self, int64_t bin_ct, std::optional<c10::ArrayRef<double>> range,
const std::optional<Tensor>& weight, bool density,
Tensor& hist, Tensor& bin_edges) {
Tensor reshaped_self = self.reshape({ self.numel(), 1 });
std::optional<Tensor> reshaped_weight = weight.has_value()
? weight.value().reshape({ weight.value().numel() }) : weight;
TensorList bins_in = bin_edges;
TensorList bins_out = bin_edges;
histogramdd_prepare_out(reshaped_self, std::vector<int64_t>{bin_ct}, hist, bins_out);
auto outer_bin_edges = select_outer_bin_edges(reshaped_self, range);
at::linspace_out(bin_edges, outer_bin_edges.first[0], outer_bin_edges.second[0], bin_ct + 1);
histogramdd_check_inputs(reshaped_self, bins_in, reshaped_weight);
histogramdd_linear_stub(reshaped_self.device().type(), reshaped_self, reshaped_weight, density, hist, bin_edges, true);
return std::forward_as_tuple(hist, bin_edges);
}
std::tuple<Tensor, Tensor>
histogram(const Tensor& self, int64_t bin_ct, std::optional<c10::ArrayRef<double>> range,
const std::optional<Tensor>& weight, bool density) {
Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous);
Tensor bin_edges_out = at::empty({0}, self.options());
return histogram_out(self, bin_ct, range, weight, density, hist, bin_edges_out);
}
/* Narrowed interface for the legacy torch.histc function.
*/
Tensor& histogram_histc_out(const Tensor& self, int64_t bin_ct,
const Scalar& min, const Scalar& max, Tensor& hist) {
Tensor bin_edges = at::empty({0}, self.options());
Tensor reshaped = self.reshape({ self.numel(), 1 });
TensorList bins_in = bin_edges;
TensorList bins_out = bin_edges;
histogramdd_prepare_out(reshaped, std::vector<int64_t>{bin_ct}, hist, bins_out);
auto outer_bin_edges = histc_select_outer_bin_edges(self, min, max);
at::linspace_out(bin_edges, outer_bin_edges.first, outer_bin_edges.second, bin_ct + 1);
histogramdd_check_inputs(reshaped, bins_in, {});
histogramdd_linear_stub(reshaped.device().type(), reshaped,
std::optional<Tensor>(), false, hist, bin_edges, false);
return hist;
}
Tensor histogram_histc(const Tensor& self, int64_t bin_ct,
const Scalar& min, const Scalar& max) {
Tensor hist = at::empty({0}, self.options(), MemoryFormat::Contiguous);
return histogram_histc_out(self, bin_ct, min, max, hist);
}
std::tuple<Tensor, std::vector<Tensor>> histogramdd(
const Tensor &self, TensorList bins, std::optional<ArrayRef<double>> /*range*/,
const std::optional<Tensor> &weight, bool density) {
auto hist = at::_histogramdd_from_bin_tensors(self, bins, weight, density);
return std::tuple<Tensor, std::vector<Tensor>>{
std::move(hist), bins.vec()};
}
std::tuple<Tensor, std::vector<Tensor>> histogramdd(
const Tensor &self, IntArrayRef bins, std::optional<ArrayRef<double>> range,
const std::optional<Tensor> &weight, bool density) {
auto bin_edges = at::_histogramdd_bin_edges(self, bins, range, weight, density);
auto hist = at::_histogramdd_from_bin_cts(self, bins, range, weight, density);
return std::tuple<Tensor, std::vector<Tensor>>{
std::move(hist), std::move(bin_edges)};
}
std::tuple<Tensor, std::vector<Tensor>> histogramdd(
const Tensor &self, int64_t bins, std::optional<ArrayRef<double>> range,
const std::optional<Tensor> &weight, bool density) {
DimVector bins_v(self.size(-1), bins);
return at::native::histogramdd(self, bins_v, range, weight, density);
}
} // namespace at::native | cpp | github | https://github.com/pytorch/pytorch | aten/src/ATen/native/Histogram.cpp |
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.gui.ContextAction import ContextAction
from processing.gui.ScriptEditorDialog import ScriptEditorDialog
from processing.algs.r.RAlgorithm import RAlgorithm
from processing.script.ScriptAlgorithm import ScriptAlgorithm
class EditScriptAction(ContextAction):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
def __init__(self, scriptType):
self.name = self.tr('Edit script', 'EditScriptAction')
self.scriptType = scriptType
def isEnabled(self):
if self.scriptType == ScriptEditorDialog.SCRIPT_PYTHON:
return isinstance(self.alg, ScriptAlgorithm) and self.alg.allowEdit
elif self.scriptType == ScriptEditorDialog.SCRIPT_R:
return isinstance(self.alg, RAlgorithm)
def execute(self):
dlg = ScriptEditorDialog(self.scriptType, self.alg)
dlg.show()
dlg.exec_()
if dlg.update:
if self.scriptType == ScriptEditorDialog.SCRIPT_PYTHON:
self.toolbox.updateProvider('script')
elif self.scriptType == ScriptEditorDialog.SCRIPT_R:
self.toolbox.updateProvider('r') | unknown | codeparrot/codeparrot-clean | ||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/solomon,ssd1307fb.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Solomon SSD1307 OLED Controller Framebuffer
maintainers:
- Maxime Ripard <mripard@kernel.org>
- Javier Martinez Canillas <javierm@redhat.com>
properties:
compatible:
oneOf:
# Deprecated compatible strings
- enum:
- solomon,ssd1305fb-i2c
- solomon,ssd1306fb-i2c
- solomon,ssd1307fb-i2c
- solomon,ssd1309fb-i2c
deprecated: true
- enum:
- sinowealth,sh1106
- solomon,ssd1305
- solomon,ssd1306
- solomon,ssd1307
- solomon,ssd1309
pwms:
maxItems: 1
vbat-supply:
description: The supply for VBAT
solomon,page-offset:
$ref: /schemas/types.yaml#/definitions/uint32
default: 1
description:
Offset of pages (band of 8 pixels) that the screen is mapped to
solomon,segment-no-remap:
type: boolean
description:
Display needs normal (non-inverted) data column to segment mapping
solomon,col-offset:
$ref: /schemas/types.yaml#/definitions/uint32
default: 0
description:
Offset of columns (COL/SEG) that the screen is mapped to
solomon,com-seq:
type: boolean
description:
Display uses sequential COM pin configuration
solomon,com-lrremap:
type: boolean
description:
Display uses left-right COM pin remap
solomon,com-invdir:
type: boolean
description:
Display uses inverted COM pin scan direction
solomon,com-offset:
$ref: /schemas/types.yaml#/definitions/uint32
default: 0
description:
Number of the COM pin wired to the first display line
solomon,prechargep1:
$ref: /schemas/types.yaml#/definitions/uint32
default: 2
description:
Length of deselect period (phase 1) in clock cycles
solomon,prechargep2:
$ref: /schemas/types.yaml#/definitions/uint32
default: 2
description:
Length of precharge period (phase 2) in clock cycles. This needs to be
the higher, the higher the capacitance of the OLED's pixels is.
solomon,dclk-div:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 16
description:
Clock divisor. The default value is controller-dependent.
solomon,dclk-frq:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 15
description:
Clock frequency, higher value means higher frequency.
The default value is controller-dependent.
solomon,lookup-table:
$ref: /schemas/types.yaml#/definitions/uint8-array
maxItems: 4
description:
8 bit value array of current drive pulse widths for BANK0, and colors A,
B, and C. Each value in range of 31 to 63 for pulse widths of 32 to 64.
Color D is always width 64.
solomon,area-color-enable:
type: boolean
description:
Display uses color mode
solomon,low-power:
type: boolean
description:
Display runs in low power mode
required:
- compatible
- reg
allOf:
- $ref: solomon,ssd-common.yaml#
- if:
properties:
compatible:
contains:
const: sinowealth,sh1106
then:
properties:
solomon,width:
default: 132
solomon,height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:
default: 5
- if:
properties:
compatible:
contains:
enum:
- solomon,ssd1305-i2c
- solomon,ssd1305
then:
properties:
solomon,width:
default: 132
solomon,height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:
default: 7
- if:
properties:
compatible:
contains:
enum:
- solomon,ssd1306-i2c
- solomon,ssd1306
then:
properties:
solomon,width:
default: 128
solomon,height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:
default: 8
- if:
properties:
compatible:
contains:
enum:
- solomon,ssd1307-i2c
- solomon,ssd1307
then:
properties:
solomon,width:
default: 128
solomon,height:
default: 39
solomon,dclk-div:
default: 2
solomon,dclk-frq:
default: 12
required:
- pwms
- if:
properties:
compatible:
contains:
enum:
- solomon,ssd1309-i2c
- solomon,ssd1309
then:
properties:
solomon,width:
default: 128
solomon,height:
default: 64
solomon,dclk-div:
default: 1
solomon,dclk-frq:
default: 10
unevaluatedProperties: false
examples:
- |
i2c {
#address-cells = <1>;
#size-cells = <0>;
ssd1307_i2c: oled@3c {
compatible = "solomon,ssd1307";
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
};
ssd1306_i2c: oled@3d {
compatible = "solomon,ssd1306";
reg = <0x3d>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
solomon,com-lrremap;
solomon,com-invdir;
solomon,com-offset = <32>;
solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>;
};
};
- |
spi {
#address-cells = <1>;
#size-cells = <0>;
ssd1307_spi: oled@0 {
compatible = "solomon,ssd1307";
reg = <0x0>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
dc-gpios = <&gpio2 8>;
spi-max-frequency = <10000000>;
};
ssd1306_spi: oled@1 {
compatible = "solomon,ssd1306";
reg = <0x1>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
dc-gpios = <&gpio2 8>;
spi-max-frequency = <10000000>;
solomon,com-lrremap;
solomon,com-invdir;
solomon,com-offset = <32>;
solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>;
};
}; | unknown | github | https://github.com/torvalds/linux | Documentation/devicetree/bindings/display/solomon,ssd1307fb.yaml |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles control flow statements: while, for, if."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import parser
from tensorflow.contrib.autograph.pyct import templates
from tensorflow.contrib.autograph.pyct import transformer
from tensorflow.contrib.autograph.pyct.static_analysis import cfg
from tensorflow.contrib.autograph.pyct.static_analysis.annos import NodeAnno
class SymbolNamer(object):
"""Describes the interface for ControlFlowTransformer's namer."""
def new_symbol(self, name_root, reserved_locals):
"""Generate a new unique symbol.
Args:
name_root: String, used as stem in the new name.
reserved_locals: Set(string), additional local symbols that are reserved
and which should not be used.
Returns:
String.
"""
raise NotImplementedError()
class ControlFlowTransformer(transformer.Base):
"""Transforms control flow structures like loops an conditionals."""
def _create_cond_branch(self, body_name, aliased_orig_names,
aliased_new_names, body, returns):
if aliased_orig_names:
template = """
def body_name():
aliased_new_names, = aliased_orig_names,
body
return (returns,)
"""
return templates.replace(
template,
body_name=body_name,
body=body,
aliased_orig_names=aliased_orig_names,
aliased_new_names=aliased_new_names,
returns=returns)
else:
template = """
def body_name():
body
return (returns,)
"""
return templates.replace(
template, body_name=body_name, body=body, returns=returns)
def _create_cond_expr(self, results, test, body_name, orelse_name):
if results is not None:
template = """
results = ag__.utils.run_cond(test, body_name, orelse_name)
"""
return templates.replace(
template,
test=test,
results=results,
body_name=body_name,
orelse_name=orelse_name)
else:
template = """
ag__.utils.run_cond(test, body_name, orelse_name)
"""
return templates.replace(
template, test=test, body_name=body_name, orelse_name=orelse_name)
def visit_If(self, node):
self.generic_visit(node)
body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, NodeAnno.ORELSE_SCOPE)
body_defs = body_scope.created | body_scope.modified
orelse_defs = orelse_scope.created | orelse_scope.modified
live = anno.getanno(node, 'live_out')
# We'll need to check if we're closing over variables that are defined
# elsewhere in the function
# NOTE: we can only detect syntactic closure in the scope
# of the code passed in. If the AutoGraph'd function itself closes
# over other variables, this analysis won't take that into account.
defined = anno.getanno(node, 'defined_in')
# We only need to return variables that are
# - modified by one or both branches
# - live (or has a live parent) at the end of the conditional
modified = []
for def_ in body_defs | orelse_defs:
def_with_parents = set((def_,)) | def_.support_set
if live & def_with_parents:
modified.append(def_)
# We need to check if live created variables are balanced
# in both branches
created = live & (body_scope.created | orelse_scope.created)
# The if statement is illegal if there are variables that are created,
# that are also live, but both branches don't create them.
if created:
if created != (body_scope.created & live):
raise ValueError(
'The main branch does not create all live symbols that the else '
'branch does.')
if created != (orelse_scope.created & live):
raise ValueError(
'The else branch does not create all live symbols that the main '
'branch does.')
# Alias the closure variables inside the conditional functions
# to avoid errors caused by the local variables created in the branch
# functions.
# We will alias variables independently for body and orelse scope,
# because different branches might write different variables.
aliased_body_orig_names = tuple(body_scope.modified - body_scope.created)
aliased_orelse_orig_names = tuple(orelse_scope.modified -
orelse_scope.created)
aliased_body_new_names = tuple(
self.context.namer.new_symbol(s.ssf(), body_scope.referenced)
for s in aliased_body_orig_names)
aliased_orelse_new_names = tuple(
self.context.namer.new_symbol(s.ssf(), orelse_scope.referenced)
for s in aliased_orelse_orig_names)
alias_body_map = dict(zip(aliased_body_orig_names, aliased_body_new_names))
alias_orelse_map = dict(
zip(aliased_orelse_orig_names, aliased_orelse_new_names))
node_body = ast_util.rename_symbols(node.body, alias_body_map)
node_orelse = ast_util.rename_symbols(node.orelse, alias_orelse_map)
if not modified:
# When the cond would return no value, we leave the cond called without
# results. That in turn should trigger the side effect guards. The
# branch functions will return a dummy value that ensures cond
# actually has some return value as well.
results = None
elif len(modified) == 1:
results = modified[0]
else:
results = gast.Tuple([s.ast() for s in modified], None)
body_name = self.context.namer.new_symbol('if_true', body_scope.referenced)
orelse_name = self.context.namer.new_symbol('if_false',
orelse_scope.referenced)
if modified:
def build_returns(aliased_names, alias_map, scope):
"""Builds list of return variables for a branch of a conditional."""
returns = []
for s in modified:
if s in aliased_names:
returns.append(alias_map[s])
else:
if s not in scope.created | defined:
raise ValueError(
'Attempting to return variable "%s" from the true branch of '
'a conditional, but it was not closed over, or created in '
'this branch.' % str(s))
else:
returns.append(s)
return tuple(returns)
body_returns = build_returns(aliased_body_orig_names, alias_body_map,
body_scope)
orelse_returns = build_returns(aliased_orelse_orig_names,
alias_orelse_map, orelse_scope)
else:
body_returns = orelse_returns = templates.replace('tf.ones(())')[0].value
body_def = self._create_cond_branch(
body_name,
aliased_orig_names=tuple(aliased_body_orig_names),
aliased_new_names=tuple(aliased_body_new_names),
body=node_body,
returns=body_returns)
orelse_def = self._create_cond_branch(
orelse_name,
aliased_orig_names=tuple(aliased_orelse_orig_names),
aliased_new_names=tuple(aliased_orelse_new_names),
body=node_orelse,
returns=orelse_returns)
cond_expr = self._create_cond_expr(results, node.test, body_name,
orelse_name)
return body_def + orelse_def + cond_expr
def visit_While(self, node):
self.generic_visit(node)
body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
cond_scope = anno.getanno(node, NodeAnno.COND_SCOPE)
cond_closure = set()
for s in cond_scope.referenced:
for root in s.support_set:
if root not in body_scope.created:
cond_closure.add(root)
state = list(body_closure)
if not state:
# TODO(mdan): Implement this properly.
# To complete this statement, we need to check whether any variable
# created inside the body scope is used before being modified outside the
# scope. This should be done during activity analysis, and in general
# should cover the case where variables may not be initialized.
raise ValueError('cannot convert while loop: no outputs')
state_ssf = [
self.context.namer.new_symbol(s.ssf(), all_referenced) for s in state
]
ssf_map = {
name: ssf
for name, ssf in zip(state, state_ssf)
if str(name) != ssf
}
if len(state) == 1:
state = state[0]
state_ssf = state_ssf[0]
state_ast_tuple = state
else:
state_ast_tuple = gast.Tuple([n.ast() for n in state], None)
node_body = ast_util.rename_symbols(node.body, ssf_map)
test = ast_util.rename_symbols(node.test, ssf_map)
template = """
def test_name(state_ssf):
return test
def body_name(state_ssf):
body
return state_ssf,
state_ast_tuple = ag__.while_stmt(
test_name, body_name, (state,), (extra_deps,))
"""
node = templates.replace(
template,
state=state,
state_ssf=state_ssf,
state_ast_tuple=state_ast_tuple,
test_name=self.context.namer.new_symbol('loop_test',
body_scope.referenced),
test=test,
body_name=self.context.namer.new_symbol('loop_body',
body_scope.referenced),
body=node_body,
extra_deps=tuple(s.ast() for s in cond_closure),
)
return node
def visit_For(self, node):
self.generic_visit(node)
body_scope = anno.getanno(node, NodeAnno.BODY_SCOPE)
body_closure = body_scope.modified - body_scope.created
all_referenced = body_scope.referenced
state = list(body_closure)
state_ssf = [
self.context.namer.new_symbol(s.ssf(), all_referenced) for s in state
]
ssf_map = {
name: ssf
for name, ssf in zip(state, state_ssf)
if str(name) != ssf
}
if len(state) == 1:
state = state[0]
state_ssf = state_ssf[0]
state_ast_tuple = state
else:
state_ast_tuple = gast.Tuple([n.ast() for n in state], None)
node_body = ast_util.rename_symbols(node.body, ssf_map)
if anno.hasanno(node, 'extra_test'):
extra_test = anno.getanno(node, 'extra_test')
extra_test = ast_util.rename_symbols(extra_test, ssf_map)
else:
extra_test = parser.parse_expression('True')
template = """
def extra_test_name(state_ssf):
return extra_test_expr
def body_name(iterate, state_ssf):
body
return state_ssf,
state_ast_tuple = ag__.for_stmt(
iter_, extra_test_name, body_name, (state,))
"""
node = templates.replace(
template,
state=state,
state_ssf=state_ssf,
state_ast_tuple=state_ast_tuple,
iter_=node.iter,
iterate=node.target,
extra_test_name=self.context.namer.new_symbol('extra_test',
all_referenced),
extra_test_expr=extra_test,
body_name=self.context.namer.new_symbol('loop_body', all_referenced),
body=node_body)
return node
def transform(node, context):
cfg.run_analyses(node, cfg.Liveness(context))
cfg.run_analyses(node, cfg.Defined(context))
node = ControlFlowTransformer(context).visit(node)
return node | unknown | codeparrot/codeparrot-clean | ||
# (c) 2018, Western Telematic Inc. <kenp@wti.com>
# (c) 2012-18 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
lookup: cpm_metering
author: "Western Telematic Inc. (@wtinetworkgear)"
version_added: "2.7"
short_description: Get Power and Current data from WTI OOB/Combo and PDU devices
description:
- "Get Power and Current data from WTI OOB/Combo and PDU devices"
options:
_terms:
description:
- This is the Action to send the module.
required: true
choices: [ "getpower", "getcurrent" ]
cpm_url:
description:
- This is the URL of the WTI device to send the module.
required: true
cpm_username:
description:
- This is the Username of the WTI device to send the module.
cpm_password:
description:
- This is the Password of the WTI device to send the module.
use_https:
description:
- Designates to use an https connection or http connection.
required: false
default: True
choices: [ True, False ]
validate_certs:
description:
- If false, SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
type: bool
default: true
use_proxy:
description: Flag to control if the lookup will observe HTTP proxy environment variables when present.
type: boolean
default: True
startdate:
description:
- Start date of the range to look for power data
required: false
enddate:
description:
- End date of the range to look for power data
required: false
"""
EXAMPLES = """
# Get Power data
- name: Get Power data for a given WTI device
- debug:
var: lookup('cpm_metering',
'getpower',
validate_certs=true,
use_https=true,
cpm_url='rest.wti.com',
cpm_username='restpower',
cpm_password='restfulpowerpass12')
# Get Current data
- name: Get Current data for a given WTI device
- debug:
var: lookup('cpm_metering',
'getcurrent',
validate_certs=true,
use_https=true,
cpm_url='rest.wti.com',
cpm_username='restpower',
cpm_password='restfulpowerpass12')
# Get Power data for a date range
- name: Get Power data for a given WTI device given a certain date range
- debug:
var: lookup('cpm_metering',
'getpower',
validate_certs=true,
use_https=true,
cpm_url='rest.wti.com',
cpm_username='restpower',
cpm_password='restfulpowerpass12',
startdate='08-12-2018'
enddate='08-14-2018')
"""
RETURN = """
_list:
description: The output JSON returned from the commands sent
returned: always
type: str
"""
import base64
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_text, to_bytes, to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(direct=kwargs)
ret = []
for term in terms:
auth = to_text(base64.b64encode(to_bytes('{0}:{1}'.format(self.get_option('cpm_username'), self.get_option('cpm_password')),
errors='surrogate_or_strict')))
additional = ""
if self.get_option("startdate") is not None and (len(self.get_option("startdate")) > 0):
if self.get_option("enddate") is not None and (len(self.get_option("enddate")) > 0):
additional = "?startdate=" + self.get_option("startdate") + "&enddate=" + self.get_option("enddate")
if self.get_option('use_https') is True:
protocol = "https://"
else:
protocol = "http://"
if (term == 'getpower'):
fullurl = ("%s%s/api/v2/config/power" % (protocol, self.get_option('cpm_url')))
elif (term == 'getcurrent'):
fullurl = ("%s%s/api/v2/config/current" % (protocol, self.get_option('cpm_url')))
else:
raise AnsibleError("Power command not recognized %s " % (term))
if (len(additional) > 0):
fullurl += additional
display.vvvv("cpm_metering connecting to %s" % fullurl)
try:
response = open_url(fullurl, validate_certs=self.get_option('validate_certs'), use_proxy=self.get_option('use_proxy'),
headers={'Content-Type': 'application/json', 'Authorization': "Basic %s" % auth})
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (fullurl, to_native(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (fullurl, to_native(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (fullurl, to_native(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (fullurl, to_native(e)))
ret.append(to_text(response.read()))
return ret | unknown | codeparrot/codeparrot-clean | ||
import geo from '@next/font/google/target.css?{"path":"pages/test.tsx","import":"Geo","arguments":["test",[1.0],{"a":2.0},3.0],"variableName":"geo"}'; | javascript | github | https://github.com/vercel/next.js | crates/next-custom-transforms/tests/fixture/next-font-loaders/many-args/output.js |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Error
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, status=None, message=None):
self.status = status
self.message = message
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args) | unknown | codeparrot/codeparrot-clean | ||
import oe.path
class NotFoundError(bb.BBHandledException):
def __init__(self, path):
self.path = path
def __str__(self):
return "Error: %s not found." % self.path
class CmdError(bb.BBHandledException):
def __init__(self, exitstatus, output):
self.status = exitstatus
self.output = output
def __str__(self):
return "Command Error: exit status: %d Output:\n%s" % (self.status, self.output)
def runcmd(args, dir = None):
import commands
if dir:
olddir = os.path.abspath(os.curdir)
if not os.path.exists(dir):
raise NotFoundError(dir)
os.chdir(dir)
# print("cwd: %s -> %s" % (olddir, dir))
try:
args = [ commands.mkarg(str(arg)) for arg in args ]
cmd = " ".join(args)
# print("cmd: %s" % cmd)
(exitstatus, output) = commands.getstatusoutput(cmd)
if exitstatus != 0:
raise CmdError(exitstatus >> 8, output)
return output
finally:
if dir:
os.chdir(olddir)
class PatchError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "Patch Error: %s" % self.msg
class PatchSet(object):
defaults = {
"strippath": 1
}
def __init__(self, dir, d):
self.dir = dir
self.d = d
self.patches = []
self._current = None
def current(self):
return self._current
def Clean(self):
"""
Clean out the patch set. Generally includes unapplying all
patches and wiping out all associated metadata.
"""
raise NotImplementedError()
def Import(self, patch, force):
if not patch.get("file"):
if not patch.get("remote"):
raise PatchError("Patch file must be specified in patch import.")
else:
patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
for param in PatchSet.defaults:
if not patch.get(param):
patch[param] = PatchSet.defaults[param]
if patch.get("remote"):
patch["file"] = bb.data.expand(bb.fetch2.localpath(patch["remote"], self.d), self.d)
patch["filemd5"] = bb.utils.md5_file(patch["file"])
def Push(self, force):
raise NotImplementedError()
def Pop(self, force):
raise NotImplementedError()
def Refresh(self, remote = None, all = None):
raise NotImplementedError()
class PatchTree(PatchSet):
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
self.patchdir = os.path.join(self.dir, 'patches')
self.seriespath = os.path.join(self.dir, 'patches', 'series')
bb.utils.mkdirhier(self.patchdir)
def _appendPatchFile(self, patch, strippath):
with open(self.seriespath, 'a') as f:
f.write(os.path.basename(patch) + "," + strippath + "\n")
shellcmd = ["cat", patch, ">" , self.patchdir + "/" + os.path.basename(patch)]
runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
def _removePatch(self, p):
patch = {}
patch['file'] = p.split(",")[0]
patch['strippath'] = p.split(",")[1]
self._applypatch(patch, False, True)
def _removePatchFile(self, all = False):
if not os.path.exists(self.seriespath):
return
patches = open(self.seriespath, 'r+').readlines()
if all:
for p in reversed(patches):
self._removePatch(os.path.join(self.patchdir, p.strip()))
patches = []
else:
self._removePatch(os.path.join(self.patchdir, patches[-1].strip()))
patches.pop()
with open(self.seriespath, 'w') as f:
for p in patches:
f.write(p)
def Import(self, patch, force = None):
""""""
PatchSet.Import(self, patch, force)
if self._current is not None:
i = self._current + 1
else:
i = 0
self.patches.insert(i, patch)
def _applypatch(self, patch, force = False, reverse = False, run = True):
shellcmd = ["cat", patch['file'], "|", "patch", "-p", patch['strippath']]
if reverse:
shellcmd.append('-R')
if not run:
return "sh" + "-c" + " ".join(shellcmd)
if not force:
shellcmd.append('--dry-run')
output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
if force:
return
shellcmd.pop(len(shellcmd) - 1)
output = runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
if not reverse:
self._appendPatchFile(patch['file'], patch['strippath'])
return output
def Push(self, force = False, all = False, run = True):
bb.note("self._current is %s" % self._current)
bb.note("patches is %s" % self.patches)
if all:
for i in self.patches:
bb.note("applying patch %s" % i)
self._applypatch(i, force)
self._current = i
else:
if self._current is not None:
next = self._current + 1
else:
next = 0
bb.note("applying patch %s" % self.patches[next])
ret = self._applypatch(self.patches[next], force)
self._current = next
return ret
def Pop(self, force = None, all = None):
if all:
self._removePatchFile(True)
self._current = None
else:
self._removePatchFile(False)
if self._current == 0:
self._current = None
if self._current is not None:
self._current = self._current - 1
def Clean(self):
""""""
self.Pop(all=True)
class GitApplyTree(PatchTree):
def __init__(self, dir, d):
PatchTree.__init__(self, dir, d)
def _applypatch(self, patch, force = False, reverse = False, run = True):
shellcmd = ["git", "--git-dir=.", "apply", "-p%s" % patch['strippath']]
if reverse:
shellcmd.append('-R')
shellcmd.append(patch['file'])
if not run:
return "sh" + "-c" + " ".join(shellcmd)
return runcmd(["sh", "-c", " ".join(shellcmd)], self.dir)
class QuiltTree(PatchSet):
def _runcmd(self, args, run = True):
quiltrc = self.d.getVar('QUILTRCFILE', True)
if not run:
return ["quilt"] + ["--quiltrc"] + [quiltrc] + args
runcmd(["quilt"] + ["--quiltrc"] + [quiltrc] + args, self.dir)
def _quiltpatchpath(self, file):
return os.path.join(self.dir, "patches", os.path.basename(file))
def __init__(self, dir, d):
PatchSet.__init__(self, dir, d)
self.initialized = False
p = os.path.join(self.dir, 'patches')
if not os.path.exists(p):
os.makedirs(p)
def Clean(self):
try:
self._runcmd(["pop", "-a", "-f"])
oe.path.remove(os.path.join(self.dir, "patches","series"))
except Exception:
pass
self.initialized = True
def InitFromDir(self):
# read series -> self.patches
seriespath = os.path.join(self.dir, 'patches', 'series')
if not os.path.exists(self.dir):
raise NotFoundError(self.dir)
if os.path.exists(seriespath):
series = file(seriespath, 'r')
for line in series.readlines():
patch = {}
parts = line.strip().split()
patch["quiltfile"] = self._quiltpatchpath(parts[0])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
if len(parts) > 1:
patch["strippath"] = parts[1][2:]
self.patches.append(patch)
series.close()
# determine which patches are applied -> self._current
try:
output = runcmd(["quilt", "applied"], self.dir)
except CmdError:
import sys
if sys.exc_value.output.strip() == "No patches applied":
return
else:
raise
output = [val for val in output.split('\n') if not val.startswith('#')]
for patch in self.patches:
if os.path.basename(patch["quiltfile"]) == output[-1]:
self._current = self.patches.index(patch)
self.initialized = True
def Import(self, patch, force = None):
if not self.initialized:
self.InitFromDir()
PatchSet.Import(self, patch, force)
oe.path.symlink(patch["file"], self._quiltpatchpath(patch["file"]), force=True)
f = open(os.path.join(self.dir, "patches","series"), "a");
f.write(os.path.basename(patch["file"]) + " -p" + patch["strippath"]+"\n")
f.close()
patch["quiltfile"] = self._quiltpatchpath(patch["file"])
patch["quiltfilemd5"] = bb.utils.md5_file(patch["quiltfile"])
# TODO: determine if the file being imported:
# 1) is already imported, and is the same
# 2) is already imported, but differs
self.patches.insert(self._current or 0, patch)
def Push(self, force = False, all = False, run = True):
# quilt push [-f]
args = ["push"]
if force:
args.append("-f")
if all:
args.append("-a")
if not run:
return self._runcmd(args, run)
self._runcmd(args)
if self._current is not None:
self._current = self._current + 1
else:
self._current = 0
def Pop(self, force = None, all = None):
# quilt pop [-f]
args = ["pop"]
if force:
args.append("-f")
if all:
args.append("-a")
self._runcmd(args)
if self._current == 0:
self._current = None
if self._current is not None:
self._current = self._current - 1
def Refresh(self, **kwargs):
if kwargs.get("remote"):
patch = self.patches[kwargs["patch"]]
if not patch:
raise PatchError("No patch found at index %s in patchset." % kwargs["patch"])
(type, host, path, user, pswd, parm) = bb.decodeurl(patch["remote"])
if type == "file":
import shutil
if not patch.get("file") and patch.get("remote"):
patch["file"] = bb.fetch2.localpath(patch["remote"], self.d)
shutil.copyfile(patch["quiltfile"], patch["file"])
else:
raise PatchError("Unable to do a remote refresh of %s, unsupported remote url scheme %s." % (os.path.basename(patch["quiltfile"]), type))
else:
# quilt refresh
args = ["refresh"]
if kwargs.get("quiltfile"):
args.append(os.path.basename(kwargs["quiltfile"]))
elif kwargs.get("patch"):
args.append(os.path.basename(self.patches[kwargs["patch"]]["quiltfile"]))
self._runcmd(args)
class Resolver(object):
def __init__(self, patchset, terminal):
raise NotImplementedError()
def Resolve(self):
raise NotImplementedError()
def Revert(self):
raise NotImplementedError()
def Finalize(self):
raise NotImplementedError()
class NOOPResolver(Resolver):
def __init__(self, patchset, terminal):
self.patchset = patchset
self.terminal = terminal
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push()
except Exception:
import sys
os.chdir(olddir)
raise
# Patch resolver which relies on the user doing all the work involved in the
# resolution, with the exception of refreshing the remote copy of the patch
# files (the urls).
class UserResolver(Resolver):
def __init__(self, patchset, terminal):
self.patchset = patchset
self.terminal = terminal
# Force a push in the patchset, then drop to a shell for the user to
# resolve any rejected hunks
def Resolve(self):
olddir = os.path.abspath(os.curdir)
os.chdir(self.patchset.dir)
try:
self.patchset.Push(False)
except CmdError, v:
# Patch application failed
patchcmd = self.patchset.Push(True, False, False)
t = self.patchset.d.getVar('T', True)
if not t:
bb.msg.fatal("Build", "T not set")
bb.utils.mkdirhier(t)
import random
rcfile = "%s/bashrc.%s.%s" % (t, str(os.getpid()), random.random())
f = open(rcfile, "w")
f.write("echo '*** Manual patch resolution mode ***'\n")
f.write("echo 'Dropping to a shell, so patch rejects can be fixed manually.'\n")
f.write("echo 'Run \"quilt refresh\" when patch is corrected, press CTRL+D to exit.'\n")
f.write("echo ''\n")
f.write(" ".join(patchcmd) + "\n")
f.close()
os.chmod(rcfile, 0775)
self.terminal("bash --rcfile " + rcfile, 'Patch Rejects: Please fix patch rejects manually', self.patchset.d)
# Construct a new PatchSet after the user's changes, compare the
# sets, checking patches for modifications, and doing a remote
# refresh on each.
oldpatchset = self.patchset
self.patchset = oldpatchset.__class__(self.patchset.dir, self.patchset.d)
for patch in self.patchset.patches:
oldpatch = None
for opatch in oldpatchset.patches:
if opatch["quiltfile"] == patch["quiltfile"]:
oldpatch = opatch
if oldpatch:
patch["remote"] = oldpatch["remote"]
if patch["quiltfile"] == oldpatch["quiltfile"]:
if patch["quiltfilemd5"] != oldpatch["quiltfilemd5"]:
bb.note("Patch %s has changed, updating remote url %s" % (os.path.basename(patch["quiltfile"]), patch["remote"]))
# user change? remote refresh
self.patchset.Refresh(remote=True, patch=self.patchset.patches.index(patch))
else:
# User did not fix the problem. Abort.
raise PatchError("Patch application failed, and user did not fix and refresh the patch.")
except Exception:
os.chdir(olddir)
raise
os.chdir(olddir) | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base
from tempest import config
from tempest import test
CONF = config.CONF
class VPNaaSTestJSON(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List, Show, Create, Delete, and Update VPN Service
List, Show, Create, Delete, and Update IKE policy
List, Show, Create, Delete, and Update IPSec policy
"""
@classmethod
def skip_checks(cls):
super(VPNaaSTestJSON, cls).skip_checks()
if not test.is_extension_enabled('vpnaas', 'network'):
msg = "vpnaas extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(VPNaaSTestJSON, cls).resource_setup()
cls.ext_net_id = CONF.network.public_network_id
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router(
data_utils.rand_name("router"),
external_network_id=CONF.network.public_network_id)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
cls.vpnservice = cls.create_vpnservice(cls.subnet['id'],
cls.router['id'])
cls.ikepolicy = cls.create_ikepolicy(
data_utils.rand_name("ike-policy-"))
cls.ipsecpolicy = cls.create_ipsecpolicy(
data_utils.rand_name("ipsec-policy-"))
def _delete_ike_policy(self, ike_policy_id):
# Deletes a ike policy and verifies if it is deleted or not
ike_list = list()
all_ike = self.client.list_ikepolicies()
for ike in all_ike['ikepolicies']:
ike_list.append(ike['id'])
if ike_policy_id in ike_list:
self.client.delete_ikepolicy(ike_policy_id)
# Asserting that the policy is not found in list after deletion
ikepolicies = self.client.list_ikepolicies()
ike_id_list = list()
for i in ikepolicies['ikepolicies']:
ike_id_list.append(i['id'])
self.assertNotIn(ike_policy_id, ike_id_list)
def _delete_ipsec_policy(self, ipsec_policy_id):
# Deletes an ike policy if it exists
try:
self.client.delete_ipsecpolicy(ipsec_policy_id)
except lib_exc.NotFound:
pass
def _assertExpected(self, expected, actual):
# Check if not expected keys/values exists in actual response body
for key, value in expected.iteritems():
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
def _delete_vpn_service(self, vpn_service_id):
self.client.delete_vpnservice(vpn_service_id)
# Asserting if vpn service is found in the list after deletion
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertNotIn(vpn_service_id, vpn_services)
def _get_tenant_id(self):
"""
Returns the tenant_id of the client current user
"""
# TODO(jroovers) This is a temporary workaround to get the tenant_id
# of the the current client. Replace this once tenant_isolation for
# neutron is fixed.
body = self.client.show_network(self.network['id'])
return body['network']['tenant_id']
@test.attr(type='smoke')
@test.idempotent_id('14311574-0737-4e53-ac05-f7ae27742eed')
def test_admin_create_ipsec_policy_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create IPSec policy for the newly created tenant
name = data_utils.rand_name('ipsec-policy')
body = (self.admin_client.
create_ipsecpolicy(name=name, tenant_id=tenant_id))
ipsecpolicy = body['ipsecpolicy']
self.assertIsNotNone(ipsecpolicy['id'])
self.addCleanup(self.admin_client.delete_ipsecpolicy,
ipsecpolicy['id'])
# Assert that created ipsec policy is found in API list call
body = self.client.list_ipsecpolicies()
ipsecpolicies = [policy['id'] for policy in body['ipsecpolicies']]
self.assertIn(ipsecpolicy['id'], ipsecpolicies)
@test.attr(type='smoke')
@test.idempotent_id('b62acdc6-0c53-4d84-84aa-859b22b79799')
def test_admin_create_vpn_service_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create vpn service for the newly created tenant
network2 = self.create_network()
subnet2 = self.create_subnet(network2)
router2 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router2['id'], subnet2['id'])
name = data_utils.rand_name('vpn-service')
body = self.admin_client.create_vpnservice(
subnet_id=subnet2['id'],
router_id=router2['id'],
name=name,
admin_state_up=True,
tenant_id=tenant_id)
vpnservice = body['vpnservice']
self.assertIsNotNone(vpnservice['id'])
self.addCleanup(self.admin_client.delete_vpnservice, vpnservice['id'])
# Assert that created vpnservice is found in API list call
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertIn(vpnservice['id'], vpn_services)
@test.attr(type='smoke')
@test.idempotent_id('58cc4a1c-443b-4f39-8fb6-c19d39f343ab')
def test_admin_create_ike_policy_for_tenant(self):
tenant_id = self._get_tenant_id()
# Create IKE policy for the newly created tenant
name = data_utils.rand_name('ike-policy')
body = (self.admin_client.
create_ikepolicy(name=name, ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1",
tenant_id=tenant_id))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self.admin_client.delete_ikepolicy, ikepolicy['id'])
# Assert that created ike policy is found in API list call
body = self.client.list_ikepolicies()
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
self.assertIn(ikepolicy['id'], ikepolicies)
@test.attr(type='smoke')
@test.idempotent_id('de5bb04c-3a1f-46b1-b329-7a8abba5c7f1')
def test_list_vpn_services(self):
# Verify the VPN service exists in the list of all VPN services
body = self.client.list_vpnservices()
vpnservices = body['vpnservices']
self.assertIn(self.vpnservice['id'], [v['id'] for v in vpnservices])
@test.attr(type='smoke')
@test.idempotent_id('aacb13b1-fdc7-41fd-bab2-32621aee1878')
def test_create_update_delete_vpn_service(self):
# Creates a VPN service and sets up deletion
network1 = self.create_network()
subnet1 = self.create_subnet(network1)
router1 = self.create_router(data_utils.rand_name('router-'),
external_network_id=self.ext_net_id)
self.create_router_interface(router1['id'], subnet1['id'])
name = data_utils.rand_name('vpn-service1')
body = self.client.create_vpnservice(subnet_id=subnet1['id'],
router_id=router1['id'],
name=name,
admin_state_up=True)
vpnservice = body['vpnservice']
self.addCleanup(self._delete_vpn_service, vpnservice['id'])
# Assert if created vpnservices are not found in vpnservices list
body = self.client.list_vpnservices()
vpn_services = [vs['id'] for vs in body['vpnservices']]
self.assertIsNotNone(vpnservice['id'])
self.assertIn(vpnservice['id'], vpn_services)
# TODO(raies): implement logic to update vpnservice
# VPNaaS client function to update is implemented.
# But precondition is that current state of vpnservice
# should be "ACTIVE" not "PENDING*"
@test.attr(type='smoke')
@test.idempotent_id('0dedfc1d-f8ee-4e2a-bfd4-7997b9dc17ff')
def test_show_vpn_service(self):
# Verifies the details of a vpn service
body = self.client.show_vpnservice(self.vpnservice['id'])
vpnservice = body['vpnservice']
self.assertEqual(self.vpnservice['id'], vpnservice['id'])
self.assertEqual(self.vpnservice['name'], vpnservice['name'])
self.assertEqual(self.vpnservice['description'],
vpnservice['description'])
self.assertEqual(self.vpnservice['router_id'], vpnservice['router_id'])
self.assertEqual(self.vpnservice['subnet_id'], vpnservice['subnet_id'])
self.assertEqual(self.vpnservice['tenant_id'], vpnservice['tenant_id'])
valid_status = ["ACTIVE", "DOWN", "BUILD", "ERROR", "PENDING_CREATE",
"PENDING_UPDATE", "PENDING_DELETE"]
self.assertIn(vpnservice['status'], valid_status)
@test.attr(type='smoke')
@test.idempotent_id('e0fb6200-da3d-4869-8340-a8c1956ca618')
def test_list_ike_policies(self):
# Verify the ike policy exists in the list of all IKE policies
body = self.client.list_ikepolicies()
ikepolicies = body['ikepolicies']
self.assertIn(self.ikepolicy['id'], [i['id'] for i in ikepolicies])
@test.attr(type='smoke')
@test.idempotent_id('d61f29a5-160c-487d-bc0d-22e32e731b44')
def test_create_update_delete_ike_policy(self):
# Creates a IKE policy
name = data_utils.rand_name('ike-policy')
body = (self.client.create_ikepolicy(
name=name,
ike_version="v1",
encryption_algorithm="aes-128",
auth_algorithm="sha1"))
ikepolicy = body['ikepolicy']
self.assertIsNotNone(ikepolicy['id'])
self.addCleanup(self._delete_ike_policy, ikepolicy['id'])
# Update IKE Policy
new_ike = {'name': data_utils.rand_name("New-IKE"),
'description': "Updated ike policy",
'encryption_algorithm': "aes-256",
'ike_version': "v2",
'pfs': "group14",
'lifetime': {'units': "seconds", 'value': 2000}}
self.client.update_ikepolicy(ikepolicy['id'], **new_ike)
# Confirm that update was successful by verifying using 'show'
body = self.client.show_ikepolicy(ikepolicy['id'])
ike_policy = body['ikepolicy']
for key, value in new_ike.iteritems():
self.assertIn(key, ike_policy)
self.assertEqual(value, ike_policy[key])
# Verification of ike policy delete
self.client.delete_ikepolicy(ikepolicy['id'])
body = self.client.list_ikepolicies()
ikepolicies = [ikp['id'] for ikp in body['ikepolicies']]
self.assertNotIn(ike_policy['id'], ikepolicies)
@test.attr(type='smoke')
@test.idempotent_id('b5fcf3a3-9407-452d-b8a8-e7c6c32baea8')
def test_show_ike_policy(self):
# Verifies the details of a ike policy
body = self.client.show_ikepolicy(self.ikepolicy['id'])
ikepolicy = body['ikepolicy']
self.assertEqual(self.ikepolicy['id'], ikepolicy['id'])
self.assertEqual(self.ikepolicy['name'], ikepolicy['name'])
self.assertEqual(self.ikepolicy['description'],
ikepolicy['description'])
self.assertEqual(self.ikepolicy['encryption_algorithm'],
ikepolicy['encryption_algorithm'])
self.assertEqual(self.ikepolicy['auth_algorithm'],
ikepolicy['auth_algorithm'])
self.assertEqual(self.ikepolicy['tenant_id'],
ikepolicy['tenant_id'])
self.assertEqual(self.ikepolicy['pfs'],
ikepolicy['pfs'])
self.assertEqual(self.ikepolicy['phase1_negotiation_mode'],
ikepolicy['phase1_negotiation_mode'])
self.assertEqual(self.ikepolicy['ike_version'],
ikepolicy['ike_version'])
@test.attr(type='smoke')
@test.idempotent_id('19ea0a2f-add9-44be-b732-ffd8a7b42f37')
def test_list_ipsec_policies(self):
# Verify the ipsec policy exists in the list of all ipsec policies
body = self.client.list_ipsecpolicies()
ipsecpolicies = body['ipsecpolicies']
self.assertIn(self.ipsecpolicy['id'], [i['id'] for i in ipsecpolicies])
@test.attr(type='smoke')
@test.idempotent_id('9c1701c9-329a-4e5d-930a-1ead1b3f86ad')
def test_create_update_delete_ipsec_policy(self):
# Creates an ipsec policy
ipsec_policy_body = {'name': data_utils.rand_name('ipsec-policy'),
'pfs': 'group5',
'encryption_algorithm': "aes-128",
'auth_algorithm': 'sha1'}
resp_body = self.client.create_ipsecpolicy(**ipsec_policy_body)
ipsecpolicy = resp_body['ipsecpolicy']
self.addCleanup(self._delete_ipsec_policy, ipsecpolicy['id'])
self._assertExpected(ipsec_policy_body, ipsecpolicy)
# Verification of ipsec policy update
new_ipsec = {'description': 'Updated ipsec policy',
'pfs': 'group2',
'name': data_utils.rand_name("New-IPSec"),
'encryption_algorithm': "aes-256",
'lifetime': {'units': "seconds", 'value': '2000'}}
body = self.client.update_ipsecpolicy(ipsecpolicy['id'],
**new_ipsec)
updated_ipsec_policy = body['ipsecpolicy']
self._assertExpected(new_ipsec, updated_ipsec_policy)
# Verification of ipsec policy delete
self.client.delete_ipsecpolicy(ipsecpolicy['id'])
self.assertRaises(lib_exc.NotFound,
self.client.delete_ipsecpolicy, ipsecpolicy['id'])
@test.attr(type='smoke')
@test.idempotent_id('601f8a05-9d3c-4539-a400-1c4b3a21b03b')
def test_show_ipsec_policy(self):
# Verifies the details of an ipsec policy
body = self.client.show_ipsecpolicy(self.ipsecpolicy['id'])
ipsecpolicy = body['ipsecpolicy']
self._assertExpected(self.ipsecpolicy, ipsecpolicy) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_lic
short_description: apply authcode to a device/instance
description:
- Apply an authcode to a device.
- The authcode should have been previously registered on the Palo Alto Networks support portal.
- The device should have Internet access.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device
required: true
password:
description:
- password for authentication
required: true
username:
description:
- username for authentication
required: false
default: "admin"
auth_code:
description:
- authcode to be applied
required: true
force:
description:
- whether to apply authcode even if device is already licensed
required: false
default: "false"
'''
EXAMPLES = '''
- hosts: localhost
connection: local
tasks:
- name: fetch license
panos_lic:
ip_address: "192.168.1.1"
password: "paloalto"
auth_code: "IBADCODE"
register: result
- name: Display serialnumber (if already registered)
debug:
var: "{{result.serialnumber}}"
'''
RETURN = '''
serialnumber:
description: serialnumber of the device in case that it has been already registered
returned: success
type: string
sample: 007200004214
'''
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
from ansible.module_utils.basic import AnsibleModule
try:
import pan.xapi
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_serial(xapi, module):
xapi.op(cmd="show system info", cmd_xml=True)
r = xapi.element_root
serial = r.find('.//serial')
if serial is None:
module.fail_json(msg="No <serial> tag in show system info")
serial = serial.text
return serial
def apply_authcode(xapi, module, auth_code):
try:
xapi.op(cmd='request license fetch auth-code "%s"' % auth_code,
cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def fetch_authcode(xapi, module):
try:
xapi.op(cmd='request license fetch', cmd_xml=True)
except pan.xapi.PanXapiError:
if hasattr(xapi, 'xml_document'):
if 'Successfully' in xapi.xml_document:
return
if 'Invalid Auth Code' in xapi.xml_document:
module.fail_json(msg="Invalid Auth Code")
raise
return
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
auth_code=dict(),
username=dict(default='admin'),
force=dict(type='bool', default=False)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
auth_code = module.params["auth_code"]
force = module.params['force']
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
if not force:
serialnumber = get_serial(xapi, module)
if serialnumber != 'unknown':
return module.exit_json(changed=False, serialnumber=serialnumber)
if auth_code:
apply_authcode(xapi, module, auth_code)
else:
fetch_authcode(xapi, module)
module.exit_json(changed=True, msg="okey dokey")
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
{
"openFiles": ["src/app/app.ts", "src/app/app.html"],
"type": "editor",
"title": "Add form submission"
} | json | github | https://github.com/angular/angular | adev/src/content/tutorials/signal-forms/steps/5-add-submission/config.json |
"""Stuff that differs in different Python versions and platform
distributions."""
from __future__ import absolute_import, division
import os
import sys
from pip._vendor.six import text_type
try:
from logging.config import dictConfig as logging_dictConfig
except ImportError:
from pip.compat.dictconfig import dictConfig as logging_dictConfig
try:
import ipaddress
except ImportError:
try:
from pip._vendor import ipaddress
except ImportError:
import ipaddr as ipaddress
ipaddress.ip_address = ipaddress.IPAddress
ipaddress.ip_network = ipaddress.IPNetwork
__all__ = [
"logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str",
"native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS",
]
if sys.version_info >= (3, 4):
uses_pycache = True
from importlib.util import cache_from_source
else:
import imp
uses_pycache = hasattr(imp, 'cache_from_source')
if uses_pycache:
cache_from_source = imp.cache_from_source
else:
cache_from_source = None
if sys.version_info >= (3,):
def console_to_str(s):
try:
return s.decode(sys.__stdout__.encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def native_str(s, replace=False):
if isinstance(s, bytes):
return s.decode('utf-8', 'replace' if replace else 'strict')
return s
else:
def console_to_str(s):
return s
def native_str(s, replace=False):
# Replace is ignored -- unicode to UTF-8 can't fail
if isinstance(s, text_type):
return s.encode('utf-8')
return s
def total_seconds(td):
if hasattr(td, "total_seconds"):
return td.total_seconds()
else:
val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
return val / 10 ** 6
def get_path_uid(path):
"""
Return path's uid.
Does not follow symlinks:
https://github.com/pypa/pip/pull/935#discussion_r5307003
Placed this function in compat due to differences on AIX and
Jython, that should eventually go away.
:raises OSError: When path is a symlink or can't be read.
"""
if hasattr(os, 'O_NOFOLLOW'):
fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
file_uid = os.fstat(fd).st_uid
os.close(fd)
else: # AIX and Jython
# WARNING: time of check vulnerabity, but best we can do w/o NOFOLLOW
if not os.path.islink(path):
# older versions of Jython don't have `os.fstat`
file_uid = os.stat(path).st_uid
else:
# raise OSError for parity with os.O_NOFOLLOW above
raise OSError(
"%s is a symlink; Will not return uid for symlinks" % path
)
return file_uid
# packages in the stdlib that may have installation metadata, but should not be
# considered 'installed'. this theoretically could be determined based on
# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
# make this ineffective, so hard-coding
stdlib_pkgs = ['python', 'wsgiref']
if sys.version_info >= (2, 7):
stdlib_pkgs.extend(['argparse'])
# windows detection, covers cpython and ironpython
WINDOWS = (sys.platform.startswith("win") or
(sys.platform == 'cli' and os.name == 'nt')) | unknown | codeparrot/codeparrot-clean | ||
"""add target offices
Revision ID: 39b878094b02
Revises: f3a7c22264fe
Create Date: 2017-03-16 15:38:48.261365
"""
# revision identifiers, used by Alembic.
revision = '39b878094b02'
down_revision = 'f3a7c22264fe'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('campaign_target_office',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('address', sa.String(length=100), nullable=True),
sa.Column('location', sa.String(length=100), nullable=True),
sa.Column('number', sqlalchemy_utils.types.phone_number.PhoneNumberType(length=20), nullable=True),
sa.Column('target_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['target_id'], ['campaign_target.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table(u'campaign_campaign', schema=None) as batch_op:
batch_op.add_column(sa.Column('target_offices', sa.String(length=100), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table(u'campaign_campaign', schema=None) as batch_op:
batch_op.drop_column('target_offices')
op.drop_table('campaign_target_office')
### end Alembic commands ### | unknown | codeparrot/codeparrot-clean | ||
export default function Categories({ categories }) {
return (
<span className="ml-1">
under
{categories.edges.length > 0 ? (
categories.edges.map((category, index) => (
<span key={index} className="ml-1">
{category.node.name}
</span>
))
) : (
<span className="ml-1">{categories.edges.node.name}</span>
)}
</span>
);
} | javascript | github | https://github.com/vercel/next.js | examples/cms-drupal/components/categories.js |
name: "\U0001F680 Feature request"
description: Submit a proposal/request for a new transformers feature
labels: [ "Feature request" ]
body:
- type: textarea
id: feature-request
validations:
required: true
attributes:
label: Feature request
description: |
A clear and concise description of the feature proposal. Please provide a link to the paper and code in case they exist.
- type: textarea
id: motivation
validations:
required: true
attributes:
label: Motivation
description: |
Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too.
- type: textarea
id: contribution
validations:
required: true
attributes:
label: Your contribution
description: |
Is there any way that you could help, e.g. by submitting a PR? Make sure to read the CONTRIBUTING.MD [readme](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) | unknown | github | https://github.com/huggingface/transformers | .github/ISSUE_TEMPLATE/feature-request.yml |
import urllib
import pytest
from mitmproxy.test import taddons
from mitmproxy.test import tflow
import mitmproxy.test.tutils
from mitmproxy.addons import serverplayback
from mitmproxy import options
from mitmproxy import exceptions
from mitmproxy import io
def tdump(path, flows):
w = io.FlowWriter(open(path, "wb"))
for i in flows:
w.add(i)
def test_config(tmpdir):
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
fpath = str(tmpdir.join("flows"))
tdump(fpath, [tflow.tflow(resp=True)])
tctx.configure(s, server_replay=[fpath])
with pytest.raises(exceptions.OptionsError):
tctx.configure(s, server_replay=[str(tmpdir)])
def test_tick():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
s.stop = True
s.final_flow = tflow.tflow()
s.final_flow.live = False
s.tick()
assert tctx.master.has_event("processing_complete")
def test_server_playback():
sp = serverplayback.ServerPlayback()
sp.configure(options.Options(), [])
f = tflow.tflow(resp=True)
assert not sp.flowmap
sp.load_flows([f])
assert sp.flowmap
assert sp.next_flow(f)
assert not sp.flowmap
sp.load_flows([f])
assert sp.flowmap
sp.clear()
assert not sp.flowmap
def test_ignore_host():
sp = serverplayback.ServerPlayback()
sp.configure(options.Options(server_replay_ignore_host=True), [])
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.host = "address"
r2.request.host = "address"
assert sp._hash(r) == sp._hash(r2)
r2.request.host = "wrong_address"
assert sp._hash(r) == sp._hash(r2)
def test_ignore_content():
s = serverplayback.ServerPlayback()
s.configure(options.Options(server_replay_ignore_content=False), [])
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert not s._hash(r) == s._hash(r2)
s.configure(options.Options(server_replay_ignore_content=True), [])
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert s._hash(r) == s._hash(r2)
r2.request.content = b""
assert s._hash(r) == s._hash(r2)
r2.request.content = None
assert s._hash(r) == s._hash(r2)
def test_ignore_content_wins_over_params():
s = serverplayback.ServerPlayback()
s.configure(
options.Options(
server_replay_ignore_content=True,
server_replay_ignore_payload_params=[
"param1", "param2"
]
),
[]
)
# NOTE: parameters are mutually exclusive in options
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r.request.content = b"paramx=y"
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2.request.content = b"paramx=x"
# same parameters
assert s._hash(r) == s._hash(r2)
def test_ignore_payload_params_other_content_type():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
tctx.configure(
s,
server_replay_ignore_content=False,
server_replay_ignore_payload_params=[
"param1", "param2"
]
)
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/json"
r.request.content = b'{"param1":"1"}'
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/json"
r2.request.content = b'{"param1":"1"}'
# same content
assert s._hash(r) == s._hash(r2)
# distint content (note only x-www-form-urlencoded payload is analysed)
r2.request.content = b'{"param1":"2"}'
assert not s._hash(r) == s._hash(r2)
def test_hash():
s = serverplayback.ServerPlayback()
s.configure(options.Options(), [])
r = tflow.tflow()
r2 = tflow.tflow()
assert s._hash(r)
assert s._hash(r) == s._hash(r2)
r.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r.request.path = "voing"
assert s._hash(r) != s._hash(r2)
r.request.path = "path?blank_value"
r2.request.path = "path?"
assert s._hash(r) != s._hash(r2)
def test_headers():
s = serverplayback.ServerPlayback()
s.configure(options.Options(server_replay_use_headers=["foo"]), [])
r = tflow.tflow(resp=True)
r.request.headers["foo"] = "bar"
r2 = tflow.tflow(resp=True)
assert not s._hash(r) == s._hash(r2)
r2.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r2.request.headers["oink"] = "bar"
assert s._hash(r) == s._hash(r2)
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
assert s._hash(r) == s._hash(r2)
def test_load():
s = serverplayback.ServerPlayback()
s.configure(options.Options(), [])
r = tflow.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tflow.tflow(resp=True)
r2.request.headers["key"] = "two"
s.load_flows([r, r2])
assert s.count() == 2
n = s.next_flow(r)
assert n.request.headers["key"] == "one"
assert s.count() == 1
n = s.next_flow(r)
assert n.request.headers["key"] == "two"
assert not s.flowmap
assert s.count() == 0
assert not s.next_flow(r)
def test_load_with_server_replay_nopop():
s = serverplayback.ServerPlayback()
s.configure(options.Options(server_replay_nopop=True), [])
r = tflow.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tflow.tflow(resp=True)
r2.request.headers["key"] = "two"
s.load_flows([r, r2])
assert s.count() == 2
s.next_flow(r)
assert s.count() == 2
def test_ignore_params():
s = serverplayback.ServerPlayback()
s.configure(
options.Options(
server_replay_ignore_params=["param1", "param2"]
),
[]
)
r = tflow.tflow(resp=True)
r.request.path = "/test?param1=1"
r2 = tflow.tflow(resp=True)
r2.request.path = "/test"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param1=2"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param2=1"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param3=2"
assert not s._hash(r) == s._hash(r2)
def thash(r, r2, setter):
s = serverplayback.ServerPlayback()
s.configure(
options.Options(
server_replay_ignore_payload_params=["param1", "param2"]
),
[]
)
setter(r, paramx="x", param1="1")
setter(r2, paramx="x", param1="1")
# same parameters
assert s._hash(r) == s._hash(r2)
# ignored parameters !=
setter(r2, paramx="x", param1="2")
assert s._hash(r) == s._hash(r2)
# missing parameter
setter(r2, paramx="x")
assert s._hash(r) == s._hash(r2)
# ignorable parameter added
setter(r2, paramx="x", param1="2")
assert s._hash(r) == s._hash(r2)
# not ignorable parameter changed
setter(r2, paramx="y", param1="1")
assert not s._hash(r) == s._hash(r2)
# not ignorable parameter missing
setter(r2, param1="1")
r2.request.content = b"param1=1"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params():
def urlencode_setter(r, **kwargs):
r.request.content = urllib.parse.urlencode(kwargs).encode()
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
thash(r, r2, urlencode_setter)
boundary = 'somefancyboundary'
def multipart_setter(r, **kwargs):
b = "--{0}\n".format(boundary)
parts = []
for k, v in kwargs.items():
parts.append(
"Content-Disposition: form-data; name=\"%s\"\n\n"
"%s\n" % (k, v)
)
c = b + b.join(parts) + b
r.request.content = c.encode()
r.request.headers["content-type"] = 'multipart/form-data; boundary=' +\
boundary
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
thash(r, r2, multipart_setter)
def test_server_playback_full():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
tctx.configure(
s,
refresh_server_playback = True,
)
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f, f])
tf = tflow.tflow()
assert not tf.response
s.request(tf)
assert tf.response == f.response
tf = tflow.tflow()
tf.request.content = b"gibble"
assert not tf.response
s.request(tf)
assert not tf.response
assert not s.stop
s.tick()
assert not s.stop
tf = tflow.tflow()
s.request(tflow.tflow())
assert s.stop
def test_server_playback_kill():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
tctx.configure(
s,
refresh_server_playback = True,
replay_kill_extra=True
)
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f])
f = tflow.tflow()
f.request.host = "nonexistent"
tctx.cycle(s, f)
assert f.reply.value == exceptions.Kill | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.vectorstores import ScaNN
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ScaNN": "langchain_community.vectorstores"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ScaNN",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/vectorstores/scann.py |
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the QtWebEngine module of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import glob
import os
import subprocess
import sys
import string
import argparse
qtwebengine_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
import git_submodule as GitSubmodule
import version_resolver as resolver
chromium_src = os.environ.get('CHROMIUM_SRC_DIR')
ninja_src = os.path.join(qtwebengine_root, 'src/3rdparty_upstream/ninja')
gn_src = os.path.join(qtwebengine_root, 'src/3rdparty_upstream/gn')
use_external_chromium = False
parser = argparse.ArgumentParser(description='Initialize QtWebEngine repository.')
parser.add_argument('--baseline-upstream', action='store_true', help='initialize using upstream Chromium submodule w/o applying patches (for maintenance purposes only)')
group = parser.add_mutually_exclusive_group()
group.add_argument('-u', '--upstream', action='store_true', help='initialize using upstream Chromium submodule')
group.add_argument('-s', '--snapshot', action='store_true', help='initialize using flat Chromium snapshot submodule (default)')
args = parser.parse_args()
if args.baseline_upstream:
args.upstream = True
if chromium_src:
chromium_src = os.path.abspath(chromium_src)
use_external_chromium = True
if not chromium_src or not os.path.isdir(chromium_src):
if args.upstream:
chromium_src = os.path.join(qtwebengine_root, 'src/3rdparty_upstream/chromium')
if args.snapshot or not chromium_src:
chromium_src = os.path.join(qtwebengine_root, 'src/3rdparty/chromium')
ninja_src = os.path.join(qtwebengine_root, 'src/3rdparty/ninja')
gn_src = os.path.join(qtwebengine_root, 'src/3rdparty/gn')
args.snapshot = True
print 'CHROMIUM_SRC_DIR not set, using Chromium in' + chromium_src
if not args.baseline_upstream:
# Write our chromium sources directory into git config.
relative_chromium_src = os.path.relpath(chromium_src, qtwebengine_root)
subprocess.call(['git', 'config', 'qtwebengine.chromiumsrcdir', relative_chromium_src])
def updateLastChange():
if use_external_chromium:
return
currentDir = os.getcwd()
os.chdir(chromium_src)
print 'updating LASTCHANGE files'
subprocess.call(['python', 'build/util/lastchange.py', '-o', 'build/util/LASTCHANGE'])
subprocess.call(['python', 'build/util/lastchange.py', '-m', 'SKIA_COMMIT_HASH', '-s', 'third_party/skia', '--header', 'skia/ext/skia_commit_hash.h'])
subprocess.call(['python', 'build/util/lastchange.py', '-m', 'GPU_LISTS_VERSION', '--revision-id-only', '--header', 'gpu/config/gpu_lists_version.h'])
os.chdir(currentDir)
def initUpstreamSubmodules():
gn_url = 'https://gn.googlesource.com/gn'
ninja_url = 'https://github.com/martine/ninja.git'
chromium_url = 'https://chromium.googlesource.com/chromium/src.git'
ninja_shasum = 'refs/tags/' + resolver.currentNinjaVersion()
chromium_ref = 'refs/tags/' + resolver.currentVersion()
os.chdir(qtwebengine_root)
current_submodules = subprocess.check_output(['git', 'submodule'])
if not 'src/3rdparty_upstream/gn' in current_submodules:
subprocess.call(['git', 'submodule', 'add', gn_url, 'src/3rdparty_upstream/gn'])
if not 'src/3rdparty_upstream/ninja' in current_submodules:
subprocess.call(['git', 'submodule', 'add', ninja_url, 'src/3rdparty_upstream/ninja'])
if not use_external_chromium and not 'src/3rdparty_upstream/chromium' in current_submodules:
subprocess.call(['git', 'submodule', 'add', chromium_url, 'src/3rdparty_upstream/chromium'])
ninjaSubmodule = GitSubmodule.Submodule()
ninjaSubmodule.path = 'src/3rdparty_upstream/ninja'
ninjaSubmodule.ref = ninja_shasum
ninjaSubmodule.url = ninja_url
ninjaSubmodule.os = 'all'
ninjaSubmodule.initialize()
gnSubmodule = GitSubmodule.Submodule()
gnSubmodule.path = 'src/3rdparty_upstream/gn'
gnSubmodule.ref = 'master'
gnSubmodule.url = gn_url
gnSubmodule.os = 'all'
gnSubmodule.initialize()
if not use_external_chromium:
chromiumSubmodule = GitSubmodule.Submodule()
chromiumSubmodule.path = 'src/3rdparty_upstream/chromium'
chromiumSubmodule.ref = chromium_ref
chromiumSubmodule.url = chromium_url
chromiumSubmodule.os = 'all'
chromiumSubmodule.initialize()
chromiumSubmodule.initSubmodules()
# Unstage repositories so we do not accidentally commit them.
subprocess.call(['git', 'reset', '-q', 'HEAD', 'src/3rdparty_upstream/gn'])
subprocess.call(['git', 'reset', '-q', 'HEAD', 'src/3rdparty_upstream/ninja'])
subprocess.call(['git', 'reset', '-q', 'HEAD', 'src/3rdparty_upstream/chromium'])
def initSnapshot():
snapshot = GitSubmodule.Submodule()
snapshot.path = 'src/3rdparty'
snapshot.os = 'all'
snapshot.initialize()
os.chdir(qtwebengine_root)
if args.upstream:
initUpstreamSubmodules()
updateLastChange()
if not args.baseline_upstream and not use_external_chromium:
subprocess.call(['python', os.path.join(qtwebengine_root, 'tools', 'scripts', 'patch_upstream.py')])
if args.snapshot:
initSnapshot() | unknown | codeparrot/codeparrot-clean | ||
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""Regular expression-based rules."""
import logging
import re
import regex
from .cpr import CPRRule
from .rule import Rule
from ..items import MatchItem
class RegexRule(Rule):
"""Represents a rule which matches using a regular expression."""
def __init__(self, name, pattern_strings, sensitivity, cpr_enabled=False, ignore_irrelevant=False,
do_modulus11=False, *args, **kwargs):
"""Initialize the rule.
The sensitivity is used to assign a sensitivity value to matches.
"""
# Convert QuerySet to list
super().__init__(*args, **kwargs)
self.regex_patterns = list(pattern_strings.all())
self.name = name
self.sensitivity = sensitivity
self.cpr_enabled = cpr_enabled
self.ignore_irrelevant = ignore_irrelevant
self.do_modulus11 = do_modulus11
self.regex_str = ''
if not self._is_cpr_only():
logging.info('------- Regex patters ---------')
for _psuedoRule in self.regex_patterns:
logging.info(_psuedoRule.pattern_string)
logging.info('-----------------------------\n')
self.regex_str = self.compund_rules()
self.regex = regex.compile(self.regex_str, regex.DOTALL)
# bind the 'do_modulus11' and 'ignore_irrelevant' variables to the cpr_enabled property so that they're always
# false if it is false
if not cpr_enabled:
self.do_modulus11 = cpr_enabled
self.ignore_irrelevant = cpr_enabled
def __str__(self):
"""
Returns a string object representation of this object
:return:
"""
return '{\n\tname: ' + self.name + \
',\n\tregex: ' + self.regex_str + \
',\n\tcpr_enabled: ' + str(self._is_cpr_only()) + \
',\n\tsensitivity: ' + str(self.sensitivity) + '\n}'
def compund_rules(self):
"""
This method compounds all the regex patterns in the rule set into one regex rule that is OR'ed
e.g. A ruleSet of {pattern1, pattern2, pattern3} becomes (pattern1 | pattern2 | pattern3)
:return: RegexRule representing the compound rule
"""
rule_set = set(self.regex_patterns)
if len(rule_set) == 1:
return rule_set.pop().pattern_string
if len(rule_set) > 1:
compound_rule = '('
for _ in self.regex_patterns:
compound_rule += rule_set.pop().pattern_string
if len(rule_set) <= 0:
compound_rule += ')'
else:
compound_rule += '|'
print('Returning< '+compound_rule+' >')
return compound_rule
if len(rule_set) < 1:
return None
def execute(self, text):
"""Execute the rule on the text."""
matches = set()
if self._is_cpr_only():
cpr_rule = CPRRule(self.do_modulus11, self.ignore_irrelevant, whitelist=None)
temp_matches = cpr_rule.execute(text)
matches.update(temp_matches)
else:
re_matches = self.regex.finditer(text)
if self.cpr_enabled:
cpr_rule = CPRRule(self.do_modulus11, self.ignore_irrelevant, whitelist=None)
matches.update(cpr_rule.execute(text))
for match in re_matches:
matched_data = match.group(0)
if len(matched_data) > 1024:
# TODO: Get rid of magic number
matched_data = match.group(1)
matches.add(MatchItem(matched_data=matched_data,
sensitivity=self.sensitivity))
return matches
def is_all_match(self, matches):
"""
Checks if each rule is matched with the provided list of matches
:param matches: List of matches
:return: {True | false}
"""
if not isinstance(matches, set):
return False
cpr_match = False
# If it turns out that we're only doing a cpr scan then scan for the first match and return true
if self._is_cpr_only():
for match in matches:
if re.match(self.cpr_pattern, match['original_matched_data']):
return True
else:
regex_patterns = set(self.regex_patterns)
# for rule in self.regex_patterns:
for pattern in self.regex_patterns:
for match in matches:
if re.match(pattern.pattern_string, match['matched_data']) and regex_patterns:
regex_patterns.pop()
continue
if self.cpr_enabled and not cpr_match and 'original_matched_data' in match:
if re.match(self.cpr_pattern, match['original_matched_data']):
cpr_match = True
if not regex_patterns:
break
if not self.cpr_enabled:
return not regex_patterns
else:
return not regex_patterns and cpr_match
def _is_cpr_only(self):
"""Just a method to decide if we are only doing a CPR scan."""
return self.cpr_enabled and len(self.regex_patterns) <= 0 | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from nose.tools import (
assert_in, assert_is_none, assert_equals, assert_raises, assert_not_equals
)
from django.test import TestCase
from student.tests.factories import UserFactory
from verify_student.models import SoftwareSecurePhotoVerification, VerificationException
class TestPhotoVerification(TestCase):
def test_state_transitions(self):
"""Make sure we can't make unexpected status transitions.
The status transitions we expect are::
created → ready → submitted → approved
↑ ↓
→ denied
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
assert_equals(attempt.status, SoftwareSecurePhotoVerification.STATUS.created)
assert_equals(attempt.status, "created")
# This should fail because we don't have the necessary fields filled out
assert_raises(VerificationException, attempt.mark_ready)
# These should all fail because we're in the wrong starting state.
assert_raises(VerificationException, attempt.submit)
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
# Now let's fill in some values so that we can pass the mark_ready() call
attempt.face_image_url = "http://fake.edx.org/face.jpg"
attempt.photo_id_image_url = "http://fake.edx.org/photo_id.jpg"
attempt.mark_ready()
assert_equals(attempt.name, user.profile.name) # Move this to another test
assert_equals(attempt.status, "ready")
# Once again, state transitions should fail here. We can't approve or
# deny anything until it's been placed into the submitted state -- i.e.
# the user has clicked on whatever agreements, or given payment, or done
# whatever the application requires before it agrees to process their
# attempt.
assert_raises(VerificationException, attempt.approve)
assert_raises(VerificationException, attempt.deny)
# Now we submit
attempt.submit()
assert_equals(attempt.status, "submitted")
# So we should be able to both approve and deny
attempt.approve()
assert_equals(attempt.status, "approved")
attempt.deny("Could not read name on Photo ID")
assert_equals(attempt.status, "denied") | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build dragonfly || freebsd || linux || netbsd || openbsd || solaris
package main
import (
"cmd/internal/buildid"
"cmd/internal/hash"
"cmd/link/internal/ld"
"debug/elf"
"encoding/binary"
"fmt"
"internal/platform"
"internal/testenv"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"testing"
"text/template"
"unsafe"
)
func getCCAndCCFLAGS(t *testing.T, env []string) (string, []string) {
goTool := testenv.GoToolPath(t)
cmd := testenv.Command(t, goTool, "env", "CC")
cmd.Env = env
ccb, err := cmd.Output()
if err != nil {
t.Fatal(err)
}
cc := strings.TrimSpace(string(ccb))
cmd = testenv.Command(t, goTool, "env", "GOGCCFLAGS")
cmd.Env = env
cflagsb, err := cmd.Output()
if err != nil {
t.Fatal(err)
}
cflags := strings.Fields(string(cflagsb))
return cc, cflags
}
var asmSource = `
.section .text1,"ax"
s1:
.byte 0
.section .text2,"ax"
s2:
.byte 0
`
var goSource = `
package main
func main() {}
`
var goSourceWithData = `
package main
var globalVar = 42
func main() { println(&globalVar) }
`
// The linker used to crash if an ELF input file had multiple text sections
// with the same name.
func TestSectionsWithSameName(t *testing.T) {
testenv.MustHaveGoBuild(t)
testenv.MustHaveCGO(t)
t.Parallel()
objcopy, err := exec.LookPath("objcopy")
if err != nil {
t.Skipf("can't find objcopy: %v", err)
}
dir := t.TempDir()
gopath := filepath.Join(dir, "GOPATH")
gopathEnv := "GOPATH=" + gopath
env := append(os.Environ(), gopathEnv)
if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module elf_test\n"), 0666); err != nil {
t.Fatal(err)
}
asmFile := filepath.Join(dir, "x.s")
if err := os.WriteFile(asmFile, []byte(asmSource), 0444); err != nil {
t.Fatal(err)
}
cc, cflags := getCCAndCCFLAGS(t, env)
asmObj := filepath.Join(dir, "x.o")
t.Logf("%s %v -c -o %s %s", cc, cflags, asmObj, asmFile)
if out, err := testenv.Command(t, cc, append(cflags, "-c", "-o", asmObj, asmFile)...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
asm2Obj := filepath.Join(dir, "x2.syso")
t.Logf("%s --rename-section .text2=.text1 %s %s", objcopy, asmObj, asm2Obj)
if out, err := testenv.Command(t, objcopy, "--rename-section", ".text2=.text1", asmObj, asm2Obj).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
for _, s := range []string{asmFile, asmObj} {
if err := os.Remove(s); err != nil {
t.Fatal(err)
}
}
goFile := filepath.Join(dir, "main.go")
if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil {
t.Fatal(err)
}
cmd := goCmd(t, "build")
cmd.Dir = dir
cmd.Env = append(cmd.Env, gopathEnv)
t.Logf("%s build", testenv.GoToolPath(t))
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
}
var cSources35779 = []string{`
static int blah() { return 42; }
int Cfunc1() { return blah(); }
`, `
static int blah() { return 42; }
int Cfunc2() { return blah(); }
`,
}
// TestMinusRSymsWithSameName tests a corner case in the new
// loader. Prior to the fix this failed with the error 'loadelf:
// $WORK/b001/_pkg_.a(ldr.syso): duplicate symbol reference: blah in
// both main(.text) and main(.text)'. See issue #35779.
func TestMinusRSymsWithSameName(t *testing.T) {
testenv.MustHaveGoBuild(t)
testenv.MustHaveCGO(t)
t.Parallel()
dir := t.TempDir()
gopath := filepath.Join(dir, "GOPATH")
gopathEnv := "GOPATH=" + gopath
env := append(os.Environ(), gopathEnv)
if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte("module elf_test\n"), 0666); err != nil {
t.Fatal(err)
}
cc, cflags := getCCAndCCFLAGS(t, env)
objs := []string{}
csrcs := []string{}
for i, content := range cSources35779 {
csrcFile := filepath.Join(dir, fmt.Sprintf("x%d.c", i))
csrcs = append(csrcs, csrcFile)
if err := os.WriteFile(csrcFile, []byte(content), 0444); err != nil {
t.Fatal(err)
}
obj := filepath.Join(dir, fmt.Sprintf("x%d.o", i))
objs = append(objs, obj)
t.Logf("%s %v -c -o %s %s", cc, cflags, obj, csrcFile)
if out, err := testenv.Command(t, cc, append(cflags, "-c", "-o", obj, csrcFile)...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
}
sysoObj := filepath.Join(dir, "ldr.syso")
t.Logf("%s %v -nostdlib -r -o %s %v", cc, cflags, sysoObj, objs)
if out, err := testenv.Command(t, cc, append(cflags, "-nostdlib", "-r", "-o", sysoObj, objs[0], objs[1])...).CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
cruft := [][]string{objs, csrcs}
for _, sl := range cruft {
for _, s := range sl {
if err := os.Remove(s); err != nil {
t.Fatal(err)
}
}
}
goFile := filepath.Join(dir, "main.go")
if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil {
t.Fatal(err)
}
t.Logf("%s build", testenv.GoToolPath(t))
cmd := goCmd(t, "build")
cmd.Dir = dir
cmd.Env = append(cmd.Env, gopathEnv)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
}
func TestGNUBuildID(t *testing.T) {
testenv.MustHaveGoBuild(t)
t.Parallel()
tmpdir := t.TempDir()
goFile := filepath.Join(tmpdir, "notes.go")
if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil {
t.Fatal(err)
}
// Use a specific Go buildid for testing.
const gobuildid = "testbuildid"
h := hash.Sum32([]byte(gobuildid))
gobuildidHash := string(h[:20])
tests := []struct{ name, ldflags, expect string }{
{"default", "", gobuildidHash},
{"gobuildid", "-B=gobuildid", gobuildidHash},
{"specific", "-B=0x0123456789abcdef", "\x01\x23\x45\x67\x89\xab\xcd\xef"},
{"none", "-B=none", ""},
}
if testenv.HasCGO() && runtime.GOOS != "solaris" && runtime.GOOS != "illumos" {
// Solaris ld doesn't support --build-id. So we don't
// add it in external linking mode.
for _, test := range tests {
t1 := test
t1.name += "_external"
t1.ldflags += " -linkmode=external"
tests = append(tests, t1)
}
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
exe := filepath.Join(tmpdir, test.name)
cmd := goCmd(t, "build", "-ldflags=-buildid="+gobuildid+" "+test.ldflags, "-o", exe, goFile)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
}
gnuBuildID, err := buildid.ReadELFNote(exe, string(ld.ELF_NOTE_BUILDINFO_NAME), ld.ELF_NOTE_BUILDINFO_TAG)
if err != nil {
t.Fatalf("can't read GNU build ID")
}
if string(gnuBuildID) != test.expect {
t.Errorf("build id mismatch: got %x, want %x", gnuBuildID, test.expect)
}
})
}
}
func TestMergeNoteSections(t *testing.T) {
testenv.MustHaveGoBuild(t)
expected := 1
switch runtime.GOOS {
case "linux", "dragonfly":
case "openbsd", "netbsd", "freebsd":
// These OSes require independent segment
expected = 2
default:
t.Skip("We should only test on elf output.")
}
t.Parallel()
goFile := filepath.Join(t.TempDir(), "notes.go")
if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil {
t.Fatal(err)
}
outFile := filepath.Join(t.TempDir(), "notes.exe")
// sha1sum of "gopher"
id := "0xf4e8cd51ce8bae2996dc3b74639cdeaa1f7fee5f"
cmd := goCmd(t, "build", "-o", outFile, "-ldflags", "-B "+id, goFile)
cmd.Dir = t.TempDir()
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
ef, err := elf.Open(outFile)
if err != nil {
t.Fatalf("open elf file failed:%v", err)
}
defer ef.Close()
sec := ef.Section(".note.gnu.build-id")
if sec == nil {
t.Fatalf("can't find gnu build id")
}
sec = ef.Section(".note.go.buildid")
if sec == nil {
t.Fatalf("can't find go build id")
}
cnt := 0
for _, ph := range ef.Progs {
if ph.Type == elf.PT_NOTE {
cnt += 1
}
}
if cnt != expected {
t.Fatalf("want %d PT_NOTE segment, got %d", expected, cnt)
}
}
const pieSourceTemplate = `
package main
import "fmt"
// Force the creation of a lot of type descriptors that will go into
// the .data.rel.ro section.
{{range $index, $element := .}}var V{{$index}} interface{} = [{{$index}}]int{}
{{end}}
func main() {
{{range $index, $element := .}} fmt.Println(V{{$index}})
{{end}}
}
`
func TestPIESize(t *testing.T) {
testenv.MustHaveGoBuild(t)
// We don't want to test -linkmode=external if cgo is not supported.
// On some systems -buildmode=pie implies -linkmode=external, so just
// always skip the test if cgo is not supported.
testenv.MustHaveCGO(t)
if !platform.BuildModeSupported(runtime.Compiler, "pie", runtime.GOOS, runtime.GOARCH) {
t.Skip("-buildmode=pie not supported")
}
t.Parallel()
tmpl := template.Must(template.New("pie").Parse(pieSourceTemplate))
writeGo := func(t *testing.T, dir string) {
f, err := os.Create(filepath.Join(dir, "pie.go"))
if err != nil {
t.Fatal(err)
}
// Passing a 100-element slice here will cause
// pieSourceTemplate to create 100 variables with
// different types.
if err := tmpl.Execute(f, make([]byte, 100)); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
}
var linkmodes []string
if platform.InternalLinkPIESupported(runtime.GOOS, runtime.GOARCH) {
linkmodes = append(linkmodes, "internal")
}
linkmodes = append(linkmodes, "external")
for _, linkmode := range linkmodes {
t.Run(fmt.Sprintf("TestPieSize-%v", linkmode), func(t *testing.T) {
t.Parallel()
dir := t.TempDir()
writeGo(t, dir)
binexe := filepath.Join(dir, "exe")
binpie := filepath.Join(dir, "pie")
binexe += linkmode
binpie += linkmode
build := func(bin, mode string) error {
cmd := goCmd(t, "build", "-o", bin, "-buildmode="+mode, "-ldflags=-linkmode="+linkmode)
cmd.Args = append(cmd.Args, "pie.go")
cmd.Dir = dir
t.Logf("%v", cmd.Args)
out, err := cmd.CombinedOutput()
if len(out) > 0 {
t.Logf("%s", out)
}
if err != nil {
t.Log(err)
}
return err
}
var errexe, errpie error
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
errexe = build(binexe, "exe")
}()
go func() {
defer wg.Done()
errpie = build(binpie, "pie")
}()
wg.Wait()
if errexe != nil || errpie != nil {
if runtime.GOOS == "android" && runtime.GOARCH == "arm64" {
testenv.SkipFlaky(t, 58806)
}
t.Fatal("link failed")
}
var sizeexe, sizepie uint64
if fi, err := os.Stat(binexe); err != nil {
t.Fatal(err)
} else {
sizeexe = uint64(fi.Size())
}
if fi, err := os.Stat(binpie); err != nil {
t.Fatal(err)
} else {
sizepie = uint64(fi.Size())
}
elfexe, err := elf.Open(binexe)
if err != nil {
t.Fatal(err)
}
defer elfexe.Close()
elfpie, err := elf.Open(binpie)
if err != nil {
t.Fatal(err)
}
defer elfpie.Close()
// The difference in size between exe and PIE
// should be approximately the difference in
// size of the .text section plus the size of
// the PIE dynamic data sections plus the
// difference in size of the .got and .plt
// sections if they exist.
// We ignore unallocated sections.
// There may be gaps between non-writeable and
// writable PT_LOAD segments. We also skip those
// gaps (see issue #36023).
textsize := func(ef *elf.File, name string) uint64 {
for _, s := range ef.Sections {
if s.Name == ".text" {
return s.Size
}
}
t.Fatalf("%s: no .text section", name)
return 0
}
textexe := textsize(elfexe, binexe)
textpie := textsize(elfpie, binpie)
dynsize := func(ef *elf.File) uint64 {
var ret uint64
for _, s := range ef.Sections {
if s.Flags&elf.SHF_ALLOC == 0 {
continue
}
switch s.Type {
case elf.SHT_DYNSYM, elf.SHT_STRTAB, elf.SHT_REL, elf.SHT_RELA, elf.SHT_HASH, elf.SHT_GNU_HASH, elf.SHT_GNU_VERDEF, elf.SHT_GNU_VERNEED, elf.SHT_GNU_VERSYM:
ret += s.Size
}
if s.Flags&elf.SHF_WRITE != 0 && (strings.Contains(s.Name, ".got") || strings.Contains(s.Name, ".plt")) {
ret += s.Size
}
}
return ret
}
dynexe := dynsize(elfexe)
dynpie := dynsize(elfpie)
extrasize := func(ef *elf.File) uint64 {
var ret uint64
// skip unallocated sections
for _, s := range ef.Sections {
if s.Flags&elf.SHF_ALLOC == 0 {
ret += s.Size
}
}
// also skip gaps between PT_LOAD segments
var prev *elf.Prog
for _, seg := range ef.Progs {
if seg.Type != elf.PT_LOAD {
continue
}
if prev != nil {
ret += seg.Off - prev.Off - prev.Filesz
}
prev = seg
}
return ret
}
extraexe := extrasize(elfexe)
extrapie := extrasize(elfpie)
if sizepie < sizeexe || sizepie-extrapie < sizeexe-extraexe {
return
}
diffReal := (sizepie - extrapie) - (sizeexe - extraexe)
diffExpected := (textpie + dynpie) - (textexe + dynexe)
t.Logf("real size difference %#x, expected %#x", diffReal, diffExpected)
if diffReal > (diffExpected + diffExpected/10) {
t.Errorf("PIE unexpectedly large: got difference of %d (%d - %d), expected difference %d", diffReal, sizepie, sizeexe, diffExpected)
}
})
}
}
func TestIssue51939(t *testing.T) {
testenv.MustHaveGoBuild(t)
t.Parallel()
td := t.TempDir()
goFile := filepath.Join(td, "issue51939.go")
if err := os.WriteFile(goFile, []byte(goSource), 0444); err != nil {
t.Fatal(err)
}
outFile := filepath.Join(td, "issue51939.exe")
cmd := goCmd(t, "build", "-o", outFile, goFile)
if out, err := cmd.CombinedOutput(); err != nil {
t.Logf("%s", out)
t.Fatal(err)
}
ef, err := elf.Open(outFile)
if err != nil {
t.Fatal(err)
}
for _, s := range ef.Sections {
if s.Flags&elf.SHF_ALLOC == 0 && s.Addr != 0 {
t.Errorf("section %s should not allocated with addr %x", s.Name, s.Addr)
}
}
}
func TestFlagR(t *testing.T) {
// Test that using the -R flag to specify a (large) alignment generates
// a working binary.
// (Test only on ELF for now. The alignment allowed differs from platform
// to platform.)
testenv.MustHaveGoBuild(t)
t.Parallel()
tmpdir := t.TempDir()
src := filepath.Join(tmpdir, "x.go")
if err := os.WriteFile(src, []byte(goSource), 0444); err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "x.exe")
cmd := goCmd(t, "build", "-ldflags=-R=0x100000", "-o", exe, src)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("build failed: %v, output:\n%s", err, out)
}
cmd = testenv.Command(t, exe)
if out, err := cmd.CombinedOutput(); err != nil {
t.Errorf("executable failed to run: %v\n%s", err, out)
}
}
func TestFlagD(t *testing.T) {
// Test that using the -D flag to specify data section address generates
// a working binary with data at the specified address.
t.Parallel()
testFlagD(t, "0x10000000", "", 0x10000000)
}
func TestFlagDUnaligned(t *testing.T) {
// Test that using the -D flag with an unaligned address errors out
t.Parallel()
testFlagDError(t, "0x10000123", "", "invalid -D value 0x10000123")
}
func TestFlagDWithR(t *testing.T) {
// Test that using the -D flag with -R flag errors on unaligned address.
t.Parallel()
testFlagDError(t, "0x30001234", "8192", "invalid -D value 0x30001234")
}
func testFlagD(t *testing.T, dataAddr string, roundQuantum string, expectedAddr uint64) {
testenv.MustHaveGoBuild(t)
tmpdir := t.TempDir()
src := filepath.Join(tmpdir, "x.go")
if err := os.WriteFile(src, []byte(goSourceWithData), 0444); err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "x.exe")
// Build linker flags
ldflags := "-D=" + dataAddr
if roundQuantum != "" {
ldflags += " -R=" + roundQuantum
}
cmd := goCmd(t, "build", "-ldflags="+ldflags, "-o", exe, src)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("build failed: %v, output:\n%s", err, out)
}
cmd = testenv.Command(t, exe)
if out, err := cmd.CombinedOutput(); err != nil {
t.Errorf("executable failed to run: %v\n%s", err, out)
}
ef, err := elf.Open(exe)
if err != nil {
t.Fatalf("open elf file failed: %v", err)
}
defer ef.Close()
// Find the first data-related section to verify segment placement
var firstDataSection *elf.Section
for _, sec := range ef.Sections {
if sec.Type == elf.SHT_PROGBITS || sec.Type == elf.SHT_NOBITS {
// These sections are writable, allocated at runtime, but not executable
// nor TLS.
isWrite := sec.Flags&elf.SHF_WRITE != 0
isExec := sec.Flags&elf.SHF_EXECINSTR != 0
isAlloc := sec.Flags&elf.SHF_ALLOC != 0
isTLS := sec.Flags&elf.SHF_TLS != 0
if isWrite && !isExec && isAlloc && !isTLS {
if firstDataSection == nil || sec.Addr < firstDataSection.Addr {
firstDataSection = sec
}
}
}
}
if firstDataSection == nil {
t.Fatalf("can't find any writable data sections")
}
if firstDataSection.Addr != expectedAddr {
t.Errorf("data section starts at 0x%x for section %s, expected 0x%x",
firstDataSection.Addr, firstDataSection.Name, expectedAddr)
}
}
func testFlagDError(t *testing.T, dataAddr string, roundQuantum string, expectedError string) {
testenv.MustHaveGoBuild(t)
tmpdir := t.TempDir()
src := filepath.Join(tmpdir, "x.go")
if err := os.WriteFile(src, []byte(goSourceWithData), 0444); err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "x.exe")
// Build linker flags
ldflags := "-D=" + dataAddr
if roundQuantum != "" {
ldflags += " -R=" + roundQuantum
}
cmd := goCmd(t, "build", "-ldflags="+ldflags, "-o", exe, src)
out, err := cmd.CombinedOutput()
if err == nil {
t.Fatalf("expected build to fail with unaligned data address, but it succeeded")
}
if !strings.Contains(string(out), expectedError) {
t.Errorf("expected error message to contain %q, got:\n%s", expectedError, out)
}
}
func TestELFHeadersSorted(t *testing.T) {
for _, buildmode := range []string{"exe", "pie"} {
t.Run(buildmode, func(t *testing.T) {
testELFHeadersSorted(t, buildmode)
})
}
}
func testELFHeadersSorted(t *testing.T, buildmode string) {
testenv.MustHaveGoBuild(t)
// We can only test this for internal linking mode.
// For external linking the external linker will
// decide how to sort the sections.
testenv.MustInternalLink(t, testenv.NoSpecialBuildTypes)
if buildmode == "pie" {
testenv.MustInternalLinkPIE(t)
}
t.Parallel()
tmpdir := t.TempDir()
src := filepath.Join(tmpdir, "x.go")
if err := os.WriteFile(src, []byte(goSourceWithData), 0o444); err != nil {
t.Fatal(err)
}
exe := filepath.Join(tmpdir, "x.exe")
cmd := goCmd(t, "build", "-buildmode="+buildmode, "-ldflags=-linkmode=internal", "-o", exe, src)
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("build failed: %v, output:\n%s", err, out)
}
// Check that the first section header is all zeroes.
f, err := os.Open(exe)
if err != nil {
t.Fatal(err)
}
defer f.Close()
var ident [elf.EI_NIDENT]byte
if _, err := f.Read(ident[:]); err != nil {
t.Fatal(err)
}
var bo binary.ByteOrder
switch elf.Data(ident[elf.EI_DATA]) {
case elf.ELFDATA2LSB:
bo = binary.LittleEndian
case elf.ELFDATA2MSB:
bo = binary.BigEndian
default:
t.Fatalf("unrecognized data encoding %d", ident[elf.EI_DATA])
}
var shoff int64
var shsize int
switch elf.Class(ident[elf.EI_CLASS]) {
case elf.ELFCLASS32:
var hdr elf.Header32
data := make([]byte, unsafe.Sizeof(hdr))
if _, err := f.ReadAt(data, 0); err != nil {
t.Fatal(err)
}
shoff = int64(bo.Uint32(data[unsafe.Offsetof(hdr.Shoff):]))
shsize = int(unsafe.Sizeof(elf.Section32{}))
case elf.ELFCLASS64:
var hdr elf.Header64
data := make([]byte, unsafe.Sizeof(hdr))
if _, err := f.ReadAt(data, 0); err != nil {
t.Fatal(err)
}
shoff = int64(bo.Uint64(data[unsafe.Offsetof(hdr.Shoff):]))
shsize = int(unsafe.Sizeof(elf.Section64{}))
default:
t.Fatalf("unrecognized class %d", ident[elf.EI_CLASS])
}
if shoff > 0 {
data := make([]byte, shsize)
if _, err := f.ReadAt(data, shoff); err != nil {
t.Fatal(err)
}
for i, c := range data {
if c != 0 {
t.Errorf("section header 0 byte %d is %d, should be zero", i, c)
}
}
}
ef, err := elf.NewFile(f)
if err != nil {
t.Fatal(err)
}
defer ef.Close()
// After the first zero section header,
// we should see allocated sections,
// then unallocated sections.
// The allocated sections should be sorted by address.
i := 1
lastAddr := uint64(0)
for i < len(ef.Sections) {
sec := ef.Sections[i]
if sec.Flags&elf.SHF_ALLOC == 0 {
break
}
if sec.Addr < lastAddr {
t.Errorf("section %d %q address %#x less than previous address %#x", i, sec.Name, sec.Addr, lastAddr)
}
lastAddr = sec.Addr
i++
}
firstUnalc := i
for i < len(ef.Sections) {
sec := ef.Sections[i]
if sec.Flags&elf.SHF_ALLOC != 0 {
t.Errorf("allocated section %d %q follows first unallocated section %d %q", i, sec.Name, firstUnalc, ef.Sections[firstUnalc].Name)
}
i++
}
} | go | github | https://github.com/golang/go | src/cmd/link/elf_test.go |
/* MIT License
*
* Copyright (c) 2023 Brad House
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* SPDX-License-Identifier: MIT
*/
#include "ares_private.h"
#ifdef HAVE_NETINET_IN_H
# include <netinet/in.h>
#endif
#ifdef HAVE_NETDB_H
# include <netdb.h>
#endif
ares_status_t ares_parse_ptr_reply_dnsrec(const ares_dns_record_t *dnsrec,
const void *addr, int addrlen,
int family, struct hostent **host)
{
ares_status_t status;
size_t ptrcount = 0;
struct hostent *hostent = NULL;
const char *hostname = NULL;
const char *ptrname = NULL;
size_t i;
size_t ancount;
*host = NULL;
/* Fetch name from query as we will use it to compare later on. Old code
* did this check, so we'll retain it. */
status = ares_dns_record_query_get(dnsrec, 0, &ptrname, NULL, NULL);
if (status != ARES_SUCCESS) {
goto done;
}
ancount = ares_dns_record_rr_cnt(dnsrec, ARES_SECTION_ANSWER);
if (ancount == 0) {
status = ARES_ENODATA;
goto done;
}
/* Response structure */
hostent = ares_malloc(sizeof(*hostent));
if (hostent == NULL) {
status = ARES_ENOMEM;
goto done;
}
memset(hostent, 0, sizeof(*hostent));
hostent->h_addr_list = ares_malloc(2 * sizeof(*hostent->h_addr_list));
if (hostent->h_addr_list == NULL) {
status = ARES_ENOMEM;
goto done;
}
memset(hostent->h_addr_list, 0, 2 * sizeof(*hostent->h_addr_list));
if (addr != NULL && addrlen > 0) {
hostent->h_addr_list[0] = ares_malloc((size_t)addrlen);
if (hostent->h_addr_list[0] == NULL) {
status = ARES_ENOMEM;
goto done;
}
memcpy(hostent->h_addr_list[0], addr, (size_t)addrlen);
}
hostent->h_addrtype = (HOSTENT_ADDRTYPE_TYPE)family;
hostent->h_length = (HOSTENT_LENGTH_TYPE)addrlen;
/* Preallocate the maximum number + 1 */
hostent->h_aliases = ares_malloc((ancount + 1) * sizeof(*hostent->h_aliases));
if (hostent->h_aliases == NULL) {
status = ARES_ENOMEM;
goto done;
}
memset(hostent->h_aliases, 0, (ancount + 1) * sizeof(*hostent->h_aliases));
/* Cycle through answers */
for (i = 0; i < ancount; i++) {
const ares_dns_rr_t *rr =
ares_dns_record_rr_get_const(dnsrec, ARES_SECTION_ANSWER, i);
if (rr == NULL) {
/* Shouldn't be possible */
status = ARES_EBADRESP; /* LCOV_EXCL_LINE: DefensiveCoding */
goto done; /* LCOV_EXCL_LINE: DefensiveCoding */
}
if (ares_dns_rr_get_class(rr) != ARES_CLASS_IN) {
continue;
}
/* Any time we see a CNAME, replace our ptrname with its value */
if (ares_dns_rr_get_type(rr) == ARES_REC_TYPE_CNAME) {
ptrname = ares_dns_rr_get_str(rr, ARES_RR_CNAME_CNAME);
if (ptrname == NULL) {
status = ARES_EBADRESP; /* LCOV_EXCL_LINE: DefensiveCoding */
goto done; /* LCOV_EXCL_LINE: DefensiveCoding */
}
}
/* Handling for PTR records below this, otherwise skip */
if (ares_dns_rr_get_type(rr) != ARES_REC_TYPE_PTR) {
continue;
}
/* Issue #683
* Old code compared the name in the rr to the ptrname, but I think this
* is wrong since it was proven wrong for A & AAAA records. Leaving
* this code commented out for future reference
*
* rname = ares_dns_rr_get_name(rr);
* if (rname == NULL) {
* status = ARES_EBADRESP;
* goto done;
* }
* if (!ares_strcaseeq(ptrname, rname)) {
* continue;
* }
*/
/* Save most recent PTR record as the hostname */
hostname = ares_dns_rr_get_str(rr, ARES_RR_PTR_DNAME);
if (hostname == NULL) {
status = ARES_EBADRESP; /* LCOV_EXCL_LINE: DefensiveCoding */
goto done; /* LCOV_EXCL_LINE: DefensiveCoding */
}
/* Append as an alias */
hostent->h_aliases[ptrcount] = ares_strdup(hostname);
if (hostent->h_aliases[ptrcount] == NULL) {
status = ARES_ENOMEM;
goto done;
}
ptrcount++;
}
if (ptrcount == 0) {
status = ARES_ENODATA;
goto done;
} else {
status = ARES_SUCCESS;
}
/* Fill in hostname */
hostent->h_name = ares_strdup(hostname);
if (hostent->h_name == NULL) {
status = ARES_ENOMEM; /* LCOV_EXCL_LINE: OutOfMemory */
goto done; /* LCOV_EXCL_LINE: OutOfMemory */
}
done:
if (status != ARES_SUCCESS) {
ares_free_hostent(hostent);
/* Compatibility */
if (status == ARES_EBADNAME) {
status = ARES_EBADRESP;
}
} else {
*host = hostent;
}
return status;
}
int ares_parse_ptr_reply(const unsigned char *abuf, int alen_int,
const void *addr, int addrlen, int family,
struct hostent **host)
{
size_t alen;
ares_dns_record_t *dnsrec = NULL;
ares_status_t status;
if (alen_int < 0) {
return ARES_EBADRESP;
}
alen = (size_t)alen_int;
status = ares_dns_parse(abuf, alen, 0, &dnsrec);
if (status != ARES_SUCCESS) {
goto done;
}
status = ares_parse_ptr_reply_dnsrec(dnsrec, addr, addrlen, family, host);
done:
ares_dns_record_destroy(dnsrec);
if (status == ARES_EBADNAME) {
status = ARES_EBADRESP;
}
return (int)status;
} | c | github | https://github.com/nodejs/node | deps/cares/src/lib/legacy/ares_parse_ptr_reply.c |
# -*- coding: utf-8 -*-
import sys
sys.path[0:0] = [""]
import bson
import os
import pickle
import unittest
import uuid
import weakref
from datetime import datetime
from bson import DBRef, ObjectId
from tests import fixtures
from tests.fixtures import (PickleEmbedded, PickleTest, PickleSignalsTest,
PickleDyanmicEmbedded, PickleDynamicTest)
from mongoengine import *
from mongoengine.errors import (NotRegistered, InvalidDocumentError,
InvalidQueryError, NotUniqueError,
FieldDoesNotExist, SaveConditionError)
from mongoengine.queryset import NULLIFY, Q
from mongoengine.connection import get_db
from mongoengine.base import get_document
from mongoengine.context_managers import switch_db, query_counter
from mongoengine import signals
TEST_IMAGE_PATH = os.path.join(os.path.dirname(__file__),
'../fields/mongoengine.png')
__all__ = ("InstanceTest",)
class InstanceTest(unittest.TestCase):
def setUp(self):
connect(db='mongoenginetest')
self.db = get_db()
class Job(EmbeddedDocument):
name = StringField()
years = IntField()
class Person(Document):
name = StringField()
age = IntField()
job = EmbeddedDocumentField(Job)
non_field = True
meta = {"allow_inheritance": True}
self.Person = Person
self.Job = Job
def tearDown(self):
for collection in self.db.collection_names():
if 'system.' in collection:
continue
self.db.drop_collection(collection)
def assertDbEqual(self, docs):
self.assertEqual(
list(self.Person._get_collection().find().sort("id")),
sorted(docs, key=lambda doc: doc["_id"]))
def assertHasInstance(self, field, instance):
self.assertTrue(hasattr(field, "_instance"))
self.assertTrue(field._instance is not None)
if isinstance(field._instance, weakref.ProxyType):
self.assertTrue(field._instance.__eq__(instance))
else:
self.assertEqual(field._instance, instance)
def test_capped_collection(self):
"""Ensure that capped collections work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
'max_size': 4096,
}
Log.drop_collection()
# Ensure that the collection handles up to its maximum
for _ in range(10):
Log().save()
self.assertEqual(Log.objects.count(), 10)
# Check that extra documents don't increase the size
Log().save()
self.assertEqual(Log.objects.count(), 10)
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 4096)
# Check that the document cannot be redefined with different options
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 11,
}
# Create the collection by accessing Document.objects
Log.objects
self.assertRaises(InvalidCollectionError, recreate_log_document)
Log.drop_collection()
def test_capped_collection_default(self):
"""Ensure that capped collections defaults work properly.
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertEqual(options['max'], 10)
self.assertEqual(options['size'], 10 * 2**20)
# Check that the document with default value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_documents': 10,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_capped_collection_no_max_size_problems(self):
"""Ensure that capped collections with odd max_size work properly.
MongoDB rounds up max_size to next multiple of 256, recreating a doc
with the same spec failed in mongoengine <0.10
"""
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
Log.drop_collection()
# Create a doc to create the collection
Log().save()
options = Log.objects._collection.options()
self.assertEqual(options['capped'], True)
self.assertTrue(options['size'] >= 10000)
# Check that the document with odd max_size value can be recreated
def recreate_log_document():
class Log(Document):
date = DateTimeField(default=datetime.now)
meta = {
'max_size': 10000,
}
# Create the collection by accessing Document.objects
Log.objects
recreate_log_document()
Log.drop_collection()
def test_repr(self):
"""Ensure that unicode representation works
"""
class Article(Document):
title = StringField()
def __unicode__(self):
return self.title
doc = Article(title=u'привет мир')
self.assertEqual('<Article: привет мир>', repr(doc))
def test_repr_none(self):
"""Ensure None values handled correctly
"""
class Article(Document):
title = StringField()
def __str__(self):
return None
doc = Article(title=u'привет мир')
self.assertEqual('<Article: None>', repr(doc))
def test_queryset_resurrects_dropped_collection(self):
self.Person.drop_collection()
self.assertEqual([], list(self.Person.objects()))
class Actor(self.Person):
pass
# Ensure works correctly with inhertited classes
Actor.objects()
self.Person.drop_collection()
self.assertEqual([], list(Actor.objects()))
def test_polymorphic_references(self):
"""Ensure that the correct subclasses are returned from a query when
using references / generic references
"""
class Animal(Document):
meta = {'allow_inheritance': True}
class Fish(Animal):
pass
class Mammal(Animal):
pass
class Dog(Mammal):
pass
class Human(Mammal):
pass
class Zoo(Document):
animals = ListField(ReferenceField(Animal))
Zoo.drop_collection()
Animal.drop_collection()
Animal().save()
Fish().save()
Mammal().save()
Dog().save()
Human().save()
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
class Zoo(Document):
animals = ListField(GenericReferenceField(Animal))
# Save a reference to each animal
zoo = Zoo(animals=Animal.objects)
zoo.save()
zoo.reload()
classes = [a.__class__ for a in Zoo.objects.first().animals]
self.assertEqual(classes, [Animal, Fish, Mammal, Dog, Human])
Zoo.drop_collection()
Animal.drop_collection()
def test_reference_inheritance(self):
class Stats(Document):
created = DateTimeField(default=datetime.now)
meta = {'allow_inheritance': False}
class CompareStats(Document):
generated = DateTimeField(default=datetime.now)
stats = ListField(ReferenceField(Stats))
Stats.drop_collection()
CompareStats.drop_collection()
list_stats = []
for i in xrange(10):
s = Stats()
s.save()
list_stats.append(s)
cmp_stats = CompareStats(stats=list_stats)
cmp_stats.save()
self.assertEqual(list_stats, CompareStats.objects.first().stats)
def test_db_field_load(self):
"""Ensure we load data correctly
"""
class Person(Document):
name = StringField(required=True)
_rank = StringField(required=False, db_field="rank")
@property
def rank(self):
return self._rank or "Private"
Person.drop_collection()
Person(name="Jack", _rank="Corporal").save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_db_embedded_doc_field_load(self):
"""Ensure we load embedded document data correctly
"""
class Rank(EmbeddedDocument):
title = StringField(required=True)
class Person(Document):
name = StringField(required=True)
rank_ = EmbeddedDocumentField(Rank,
required=False,
db_field='rank')
@property
def rank(self):
if self.rank_ is None:
return "Private"
return self.rank_.title
Person.drop_collection()
Person(name="Jack", rank_=Rank(title="Corporal")).save()
Person(name="Fred").save()
self.assertEqual(Person.objects.get(name="Jack").rank, "Corporal")
self.assertEqual(Person.objects.get(name="Fred").rank, "Private")
def test_custom_id_field(self):
"""Ensure that documents may be created with custom primary keys.
"""
class User(Document):
username = StringField(primary_key=True)
name = StringField()
meta = {'allow_inheritance': True}
User.drop_collection()
self.assertEqual(User._fields['username'].db_field, '_id')
self.assertEqual(User._meta['id_field'], 'username')
def create_invalid_user():
User(name='test').save() # no primary key field
self.assertRaises(ValidationError, create_invalid_user)
def define_invalid_user():
class EmailUser(User):
email = StringField(primary_key=True)
self.assertRaises(ValueError, define_invalid_user)
class EmailUser(User):
email = StringField()
user = User(username='test', name='test user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'test')
self.assertEqual(user_obj.pk, 'test')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'test')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
user = User(pk='mongo', name='mongo user')
user.save()
user_obj = User.objects.first()
self.assertEqual(user_obj.id, 'mongo')
self.assertEqual(user_obj.pk, 'mongo')
user_son = User.objects._collection.find_one()
self.assertEqual(user_son['_id'], 'mongo')
self.assertTrue('username' not in user_son['_id'])
User.drop_collection()
def test_document_not_registered(self):
class Place(Document):
name = StringField()
meta = {'allow_inheritance': True}
class NicePlace(Place):
pass
Place.drop_collection()
Place(name="London").save()
NicePlace(name="Buckingham Palace").save()
# Mimic Place and NicePlace definitions being in a different file
# and the NicePlace model not being imported in at query time.
from mongoengine.base import _document_registry
del(_document_registry['Place.NicePlace'])
def query_without_importing_nice_place():
print Place.objects.all()
self.assertRaises(NotRegistered, query_without_importing_nice_place)
def test_document_registry_regressions(self):
class Location(Document):
name = StringField()
meta = {'allow_inheritance': True}
class Area(Location):
location = ReferenceField('Location', dbref=True)
Location.drop_collection()
self.assertEqual(Area, get_document("Area"))
self.assertEqual(Area, get_document("Location.Area"))
def test_creation(self):
"""Ensure that document may be created using keyword arguments.
"""
person = self.Person(name="Test User", age=30)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 30)
def test_to_dbref(self):
"""Ensure that you can get a dbref of a document"""
person = self.Person(name="Test User", age=30)
self.assertRaises(OperationError, person.to_dbref)
person.save()
person.to_dbref()
def test_reload(self):
"""Ensure that attributes may be reloaded.
"""
person = self.Person(name="Test User", age=20)
person.save()
person_obj = self.Person.objects.first()
person_obj.name = "Mr Test User"
person_obj.age = 21
person_obj.save()
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 20)
person.reload('age')
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
person.reload()
self.assertEqual(person.name, "Mr Test User")
self.assertEqual(person.age, 21)
def test_reload_sharded(self):
class Animal(Document):
superphylum = StringField()
meta = {'shard_key': ('superphylum',)}
Animal.drop_collection()
doc = Animal(superphylum='Deuterostomia')
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_sharded_nested(self):
class SuperPhylum(EmbeddedDocument):
name = StringField()
class Animal(Document):
superphylum = EmbeddedDocumentField(SuperPhylum)
meta = {'shard_key': ('superphylum.name',)}
Animal.drop_collection()
doc = Animal(superphylum=SuperPhylum(name='Deuterostomia'))
doc.save()
doc.reload()
Animal.drop_collection()
def test_reload_referencing(self):
"""Ensures reloading updates weakrefs correctly
"""
class Embedded(EmbeddedDocument):
dict_field = DictField()
list_field = ListField()
class Doc(Document):
dict_field = DictField()
list_field = ListField()
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc()
doc.dict_field = {'hello': 'world'}
doc.list_field = ['1', 2, {'hello': 'world'}]
embedded_1 = Embedded()
embedded_1.dict_field = {'hello': 'world'}
embedded_1.list_field = ['1', 2, {'hello': 'world'}]
doc.embedded_field = embedded_1
doc.save()
doc = doc.reload(10)
doc.list_field.append(1)
doc.dict_field['woot'] = "woot"
doc.embedded_field.list_field.append(1)
doc.embedded_field.dict_field['woot'] = "woot"
self.assertEqual(doc._get_changed_fields(), [
'list_field', 'dict_field.woot', 'embedded_field.list_field',
'embedded_field.dict_field.woot'])
doc.save()
self.assertEqual(len(doc.list_field), 4)
doc = doc.reload(10)
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 4)
self.assertEqual(len(doc.dict_field), 2)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
doc.list_field.append(1)
doc.save()
doc.dict_field['extra'] = 1
doc = doc.reload(10, 'list_field')
self.assertEqual(doc._get_changed_fields(), [])
self.assertEqual(len(doc.list_field), 5)
self.assertEqual(len(doc.dict_field), 3)
self.assertEqual(len(doc.embedded_field.list_field), 4)
self.assertEqual(len(doc.embedded_field.dict_field), 2)
def test_reload_doesnt_exist(self):
class Foo(Document):
pass
f = Foo()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
f.save()
f.delete()
try:
f.reload()
except Foo.DoesNotExist:
pass
except Exception:
self.assertFalse("Threw wrong exception")
def test_reload_of_non_strict_with_special_field_name(self):
"""Ensures reloading works for documents with meta strict == False
"""
class Post(Document):
meta = {
'strict': False
}
title = StringField()
items = ListField()
Post.drop_collection()
Post._get_collection().insert_one({
"title": "Items eclipse",
"items": ["more lorem", "even more ipsum"]
})
post = Post.objects.first()
post.reload()
self.assertEqual(post.title, "Items eclipse")
self.assertEqual(post.items, ["more lorem", "even more ipsum"])
def test_dictionary_access(self):
"""Ensure that dictionary-style field access works properly.
"""
person = self.Person(name='Test User', age=30, job=self.Job())
self.assertEqual(person['name'], 'Test User')
self.assertRaises(KeyError, person.__getitem__, 'salary')
self.assertRaises(KeyError, person.__setitem__, 'salary', 50)
person['name'] = 'Another User'
self.assertEqual(person['name'], 'Another User')
# Length = length(assigned fields + id)
self.assertEqual(len(person), 5)
self.assertTrue('age' in person)
person.age = None
self.assertFalse('age' in person)
self.assertFalse('nationality' in person)
def test_embedded_document_to_mongo(self):
class Person(EmbeddedDocument):
name = StringField()
age = IntField()
meta = {"allow_inheritance": True}
class Employee(Person):
salary = IntField()
self.assertEqual(Person(name="Bob", age=35).to_mongo().keys(),
['_cls', 'name', 'age'])
self.assertEqual(
Employee(name="Bob", age=35, salary=0).to_mongo().keys(),
['_cls', 'name', 'age', 'salary'])
def test_embedded_document_to_mongo_id(self):
class SubDoc(EmbeddedDocument):
id = StringField(required=True)
sub_doc = SubDoc(id="abc")
self.assertEqual(sub_doc.to_mongo().keys(), ['id'])
def test_embedded_document(self):
"""Ensure that embedded documents are set up correctly.
"""
class Comment(EmbeddedDocument):
content = StringField()
self.assertTrue('content' in Comment._fields)
self.assertFalse('id' in Comment._fields)
def test_embedded_document_instance(self):
"""Ensure that embedded documents can reference parent instance
"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = EmbeddedDocumentField(Embedded)
Doc.drop_collection()
doc = Doc(embedded_field=Embedded(string="Hi"))
self.assertHasInstance(doc.embedded_field, doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field, doc)
def test_embedded_document_complex_instance(self):
"""Ensure that embedded documents in complex fields can reference
parent instance"""
class Embedded(EmbeddedDocument):
string = StringField()
class Doc(Document):
embedded_field = ListField(EmbeddedDocumentField(Embedded))
Doc.drop_collection()
doc = Doc(embedded_field=[Embedded(string="Hi")])
self.assertHasInstance(doc.embedded_field[0], doc)
doc.save()
doc = Doc.objects.get()
self.assertHasInstance(doc.embedded_field[0], doc)
def test_instance_is_set_on_setattr(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
email = EmbeddedDocumentField(Email)
Account.drop_collection()
acc = Account()
acc.email = Email(email='test@example.com')
self.assertHasInstance(acc._data["email"], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["email"], acc1)
def test_instance_is_set_on_setattr_on_embedded_document_list(self):
class Email(EmbeddedDocument):
email = EmailField()
class Account(Document):
emails = EmbeddedDocumentListField(Email)
Account.drop_collection()
acc = Account()
acc.emails = [Email(email='test@example.com')]
self.assertHasInstance(acc._data["emails"][0], acc)
acc.save()
acc1 = Account.objects.first()
self.assertHasInstance(acc1._data["emails"][0], acc1)
def test_document_clean(self):
class TestDocument(Document):
status = StringField()
pub_date = DateTimeField()
def clean(self):
if self.status == 'draft' and self.pub_date is not None:
msg = 'Draft entries may not have a publication date.'
raise ValidationError(msg)
# Set the pub_date for published items if not set.
if self.status == 'published' and self.pub_date is None:
self.pub_date = datetime.now()
TestDocument.drop_collection()
t = TestDocument(status="draft", pub_date=datetime.now())
try:
t.save()
except ValidationError, e:
expect_msg = "Draft entries may not have a publication date."
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'__all__': expect_msg})
t = TestDocument(status="published")
t.save(clean=False)
self.assertEqual(t.pub_date, None)
t = TestDocument(status="published")
t.save(clean=True)
self.assertEqual(type(t.pub_date), datetime)
def test_document_embedded_clean(self):
class TestEmbeddedDocument(EmbeddedDocument):
x = IntField(required=True)
y = IntField(required=True)
z = IntField(required=True)
meta = {'allow_inheritance': False}
def clean(self):
if self.z:
if self.z != self.x + self.y:
raise ValidationError('Value of z != x + y')
else:
self.z = self.x + self.y
class TestDocument(Document):
doc = EmbeddedDocumentField(TestEmbeddedDocument)
status = StringField()
TestDocument.drop_collection()
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25, z=15))
try:
t.save()
except ValidationError, e:
expect_msg = "Value of z != x + y"
self.assertTrue(expect_msg in e.message)
self.assertEqual(e.to_dict(), {'doc': {'__all__': expect_msg}})
t = TestDocument(doc=TestEmbeddedDocument(x=10, y=25)).save()
self.assertEqual(t.doc.z, 35)
# Asserts not raises
t = TestDocument(doc=TestEmbeddedDocument(x=15, y=35, z=5))
t.save(clean=False)
def test_modify_empty(self):
doc = self.Person(name="bob", age=10).save()
self.assertRaises(
InvalidDocumentError, lambda: self.Person().modify(set__age=10))
self.assertDbEqual([dict(doc.to_mongo())])
def test_modify_invalid_query(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
self.assertRaises(
InvalidQueryError,
lambda: doc1.modify(dict(id=doc2.id), set__value=20))
self.assertDbEqual(docs)
def test_modify_match_another_document(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(name="jim", age=20).save()
docs = [dict(doc1.to_mongo()), dict(doc2.to_mongo())]
assert not doc1.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_not_exists(self):
doc1 = self.Person(name="bob", age=10).save()
doc2 = self.Person(id=ObjectId(), name="jim", age=20)
docs = [dict(doc1.to_mongo())]
assert not doc2.modify(dict(name=doc2.name), set__age=100)
self.assertDbEqual(docs)
def test_modify_update(self):
other_doc = self.Person(name="bob", age=10).save()
doc = self.Person(
name="jim", age=20, job=self.Job(name="10gen", years=3)).save()
doc_copy = doc._from_son(doc.to_mongo())
# these changes must go away
doc.name = "liza"
doc.job.name = "Google"
doc.job.years = 3
assert doc.modify(
set__age=21, set__job__name="MongoDB", unset__job__years=True)
doc_copy.age = 21
doc_copy.job.name = "MongoDB"
del doc_copy.job.years
assert doc.to_json() == doc_copy.to_json()
assert doc._get_changed_fields() == []
self.assertDbEqual([dict(other_doc.to_mongo()), dict(doc.to_mongo())])
def test_save(self):
"""Ensure that a document may be saved in the database.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(person_obj['name'], 'Test User')
self.assertEqual(person_obj['age'], 30)
self.assertEqual(person_obj['_id'], person.id)
# Test skipping validation on save
class Recipient(Document):
email = EmailField(required=True)
recipient = Recipient(email='root@localhost')
self.assertRaises(ValidationError, recipient.save)
try:
recipient.save(validate=False)
except ValidationError:
self.fail()
def test_save_to_a_value_that_equates_to_false(self):
class Thing(EmbeddedDocument):
count = IntField()
class User(Document):
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
user = User(thing=Thing(count=1))
user.save()
user.reload()
user.thing.count = 0
user.save()
user.reload()
self.assertEqual(user.thing.count, 0)
def test_save_max_recursion_not_hit(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
friend = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p1.friend = p2
p1.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
p0 = Person.objects.first()
p0.name = 'wpjunior'
p0.save()
def test_save_max_recursion_not_hit_with_file_field(self):
class Foo(Document):
name = StringField()
picture = FileField()
bar = ReferenceField('self')
Foo.drop_collection()
a = Foo(name='hello').save()
a.bar = a
with open(TEST_IMAGE_PATH, 'rb') as test_image:
a.picture = test_image
a.save()
# Confirm can save and it resets the changed fields without hitting
# max recursion error
b = Foo.objects.with_id(a.id)
b.name = 'world'
b.save()
self.assertEqual(b.picture, b.bar.picture, b.bar.bar.picture)
def test_save_cascades(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_kwargs(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p1.name = "Daddy Wilson"
p2.save(force_insert=True, cascade_kwargs={"force_insert": False})
p1.reload()
p2.reload()
self.assertEqual(p1.name, p2.parent.name)
def test_save_cascade_meta_false(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_cascade_meta_true(self):
class Person(Document):
name = StringField()
parent = ReferenceField('self')
meta = {'cascade': False}
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.parent = None
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save(cascade=True)
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
def test_save_cascades_generically(self):
class Person(Document):
name = StringField()
parent = GenericReferenceField()
Person.drop_collection()
p1 = Person(name="Wilson Snr")
p1.save()
p2 = Person(name="Wilson Jr")
p2.parent = p1
p2.save()
p = Person.objects(name="Wilson Jr").get()
p.parent.name = "Daddy Wilson"
p.save()
p1.reload()
self.assertNotEqual(p1.name, p.parent.name)
p.save(cascade=True)
p1.reload()
self.assertEqual(p1.name, p.parent.name)
def test_save_atomicity_condition(self):
class Widget(Document):
toggle = BooleanField(default=False)
count = IntField(default=0)
save_id = UUIDField()
def flip(widget):
widget.toggle = not widget.toggle
widget.count += 1
def UUID(i):
return uuid.UUID(int=i)
Widget.drop_collection()
w1 = Widget(toggle=False, save_id=UUID(1))
# ignore save_condition on new record creation
w1.save(save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.save_id, UUID(1))
self.assertEqual(w1.count, 0)
# mismatch in save_condition prevents save and raise exception
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'save_id': UUID(42)})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 0)
# matched save_condition allows save
flip(w1)
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
w1.save(save_condition={'save_id': UUID(1)})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 1)
# save_condition can be used to ensure atomic read & updates
# i.e., prevent interleaved reads and writes from separate contexts
w2 = Widget.objects.get()
self.assertEqual(w1, w2)
old_id = w1.save_id
flip(w1)
w1.save_id = UUID(2)
w1.save(save_condition={'save_id': old_id})
w1.reload()
self.assertFalse(w1.toggle)
self.assertEqual(w1.count, 2)
flip(w2)
flip(w2)
self.assertRaises(SaveConditionError,
w2.save, save_condition={'save_id': old_id})
w2.reload()
self.assertFalse(w2.toggle)
self.assertEqual(w2.count, 2)
# save_condition uses mongoengine-style operator syntax
flip(w1)
w1.save(save_condition={'count__lt': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
flip(w1)
self.assertRaises(SaveConditionError,
w1.save, save_condition={'count__gte': w1.count})
w1.reload()
self.assertTrue(w1.toggle)
self.assertEqual(w1.count, 3)
def test_update(self):
"""Ensure that an existing document is updated instead of be
overwritten."""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30)
person.save()
# Create same person object, with same id, without age
same_person = self.Person(name='Test')
same_person.id = person.id
same_person.save()
# Confirm only one object
self.assertEqual(self.Person.objects.count(), 1)
# reload
person.reload()
same_person.reload()
# Confirm the same
self.assertEqual(person, same_person)
self.assertEqual(person.name, same_person.name)
self.assertEqual(person.age, same_person.age)
# Confirm the saved values
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# Test only / exclude only updates included fields
person = self.Person.objects.only('name').get()
person.name = 'User'
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Test only / exclude can set non excluded / included fields
person = self.Person.objects.only('name').get()
person.name = 'Test'
person.age = 30
person.save()
person.reload()
self.assertEqual(person.name, 'Test')
self.assertEqual(person.age, 30)
# test exclude only updates set fields
person = self.Person.objects.exclude('name').get()
person.name = 'User'
person.age = 21
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
# Confirm does remove unrequired fields
person = self.Person.objects.exclude('name').get()
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, None)
person = self.Person.objects.get()
person.name = None
person.age = None
person.save()
person.reload()
self.assertEqual(person.name, None)
self.assertEqual(person.age, None)
def test_inserts_if_you_set_the_pk(self):
p1 = self.Person(name='p1', id=bson.ObjectId()).save()
p2 = self.Person(name='p2')
p2.id = bson.ObjectId()
p2.save()
self.assertEqual(2, self.Person.objects.count())
def test_can_save_if_not_included(self):
class EmbeddedDoc(EmbeddedDocument):
pass
class Simple(Document):
pass
class Doc(Document):
string_field = StringField(default='1')
int_field = IntField(default=1)
float_field = FloatField(default=1.1)
boolean_field = BooleanField(default=True)
datetime_field = DateTimeField(default=datetime.now)
embedded_document_field = EmbeddedDocumentField(
EmbeddedDoc, default=lambda: EmbeddedDoc())
list_field = ListField(default=lambda: [1, 2, 3])
dict_field = DictField(default=lambda: {"hello": "world"})
objectid_field = ObjectIdField(default=bson.ObjectId)
reference_field = ReferenceField(Simple, default=lambda:
Simple().save())
map_field = MapField(IntField(), default=lambda: {"simple": 1})
decimal_field = DecimalField(default=1.0)
complex_datetime_field = ComplexDateTimeField(default=datetime.now)
url_field = URLField(default="http://mongoengine.org")
dynamic_field = DynamicField(default=1)
generic_reference_field = GenericReferenceField(
default=lambda: Simple().save())
sorted_list_field = SortedListField(IntField(),
default=lambda: [1, 2, 3])
email_field = EmailField(default="ross@example.com")
geo_point_field = GeoPointField(default=lambda: [1, 2])
sequence_field = SequenceField()
uuid_field = UUIDField(default=uuid.uuid4)
generic_embedded_document_field = GenericEmbeddedDocumentField(
default=lambda: EmbeddedDoc())
Simple.drop_collection()
Doc.drop_collection()
Doc().save()
my_doc = Doc.objects.only("string_field").first()
my_doc.string_field = "string"
my_doc.save()
my_doc = Doc.objects.get(string_field="string")
self.assertEqual(my_doc.string_field, "string")
self.assertEqual(my_doc.int_field, 1)
def test_document_update(self):
def update_not_saved_raises():
person = self.Person(name='dcrosta')
person.update(set__name='Dan Crosta')
self.assertRaises(OperationError, update_not_saved_raises)
author = self.Person(name='dcrosta')
author.save()
author.update(set__name='Dan Crosta')
author.reload()
p1 = self.Person.objects.first()
self.assertEqual(p1.name, author.name)
def update_no_value_raises():
person = self.Person.objects.first()
person.update()
self.assertRaises(OperationError, update_no_value_raises)
def update_no_op_should_default_to_set():
person = self.Person.objects.first()
person.update(name="Dan")
person.reload()
return person.name
self.assertEqual("Dan", update_no_op_should_default_to_set())
def test_update_unique_field(self):
class Doc(Document):
name = StringField(unique=True)
doc1 = Doc(name="first").save()
doc2 = Doc(name="second").save()
self.assertRaises(NotUniqueError, lambda:
doc2.update(set__name=doc1.name))
def test_embedded_update(self):
"""
Test update on `EmbeddedDocumentField` fields
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_embedded_update_db_field(self):
"""
Test update on `EmbeddedDocumentField` fields when db_field is other
than default.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
db_field="page_log_message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site = Site.objects.first()
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_save_only_changed_fields(self):
"""Ensure save only sets / unsets changed fields
"""
class User(self.Person):
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
user = User(name='Test User', age=30, active=True)
user.save()
user.reload()
# Simulated Race condition
same_person = self.Person.objects.get()
same_person.active = False
user.age = 21
user.save()
same_person.name = 'User'
same_person.save()
person = self.Person.objects.get()
self.assertEqual(person.name, 'User')
self.assertEqual(person.age, 21)
self.assertEqual(person.active, False)
def test_query_count_when_saving(self):
"""Ensure references don't cause extra fetches when saving"""
class Organization(Document):
name = StringField()
class User(Document):
name = StringField()
orgs = ListField(ReferenceField('Organization'))
class Feed(Document):
name = StringField()
class UserSubscription(Document):
name = StringField()
user = ReferenceField(User)
feed = ReferenceField(Feed)
Organization.drop_collection()
User.drop_collection()
Feed.drop_collection()
UserSubscription.drop_collection()
o1 = Organization(name="o1").save()
o2 = Organization(name="o2").save()
u1 = User(name="Ross", orgs=[o1, o2]).save()
f1 = Feed(name="MongoEngine").save()
sub = UserSubscription(user=u1, feed=f1).save()
user = User.objects.first()
# Even if stored as ObjectId's internally mongoengine uses DBRefs
# As ObjectId's aren't automatically derefenced
self.assertTrue(isinstance(user._data['orgs'][0], DBRef))
self.assertTrue(isinstance(user.orgs[0], Organization))
self.assertTrue(isinstance(user._data['orgs'][0], Organization))
# Changing a value
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.name = "Test Sub"
sub.save()
self.assertEqual(q, 2)
# Changing a value that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
self.assertEqual(q, 1)
sub.user.name = "Test"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 3)
# Changing a value and one that will cascade
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription.objects.first()
sub.name = "Test Sub 2"
self.assertEqual(q, 1)
sub.user.name = "Test 2"
self.assertEqual(q, 2)
sub.save(cascade=True)
self.assertEqual(q, 4) # One for the UserSub and one for the User
# Saving with just the refs
with query_counter() as q:
self.assertEqual(q, 0)
sub = UserSubscription(user=u1.pk, feed=f1.pk)
self.assertEqual(q, 0)
sub.save()
self.assertEqual(q, 1)
# Saving with just the refs on a ListField
with query_counter() as q:
self.assertEqual(q, 0)
User(name="Bob", orgs=[o1.pk, o2.pk]).save()
self.assertEqual(q, 1)
# Saving new objects
with query_counter() as q:
self.assertEqual(q, 0)
user = User.objects.first()
self.assertEqual(q, 1)
feed = Feed.objects.first()
self.assertEqual(q, 2)
sub = UserSubscription(user=user, feed=feed)
self.assertEqual(q, 2) # Check no change
sub.save()
self.assertEqual(q, 3)
def test_set_unset_one_operation(self):
"""Ensure that $set and $unset actions are performed in the same
operation.
"""
class FooBar(Document):
foo = StringField(default=None)
bar = StringField(default=None)
FooBar.drop_collection()
# write an entity with a single prop
foo = FooBar(foo='foo').save()
self.assertEqual(foo.foo, 'foo')
del foo.foo
foo.bar = 'bar'
with query_counter() as q:
self.assertEqual(0, q)
foo.save()
self.assertEqual(1, q)
def test_save_only_changed_fields_recursive(self):
"""Ensure save only sets / unsets changed fields
"""
class Comment(EmbeddedDocument):
published = BooleanField(default=True)
class User(self.Person):
comments_dict = DictField()
comments = ListField(EmbeddedDocumentField(Comment))
active = BooleanField(default=True)
User.drop_collection()
# Create person object and save it to the database
person = User(name='Test User', age=30, active=True)
person.comments.append(Comment())
person.save()
person.reload()
person = self.Person.objects.get()
self.assertTrue(person.comments[0].published)
person.comments[0].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments[0].published)
# Simple dict w
person.comments_dict['first_post'] = Comment()
person.save()
person = self.Person.objects.get()
self.assertTrue(person.comments_dict['first_post'].published)
person.comments_dict['first_post'].published = False
person.save()
person = self.Person.objects.get()
self.assertFalse(person.comments_dict['first_post'].published)
def test_delete(self):
"""Ensure that document may be deleted using the delete method.
"""
person = self.Person(name="Test User", age=30)
person.save()
self.assertEqual(self.Person.objects.count(), 1)
person.delete()
self.assertEqual(self.Person.objects.count(), 0)
def test_save_custom_id(self):
"""Ensure that a document may be saved with a custom _id.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
id='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_custom_pk(self):
"""
Ensure that a document may be saved with a custom _id using pk alias.
"""
# Create person object and save it to the database
person = self.Person(name='Test User', age=30,
pk='497ce96f395f2f052a494fd4')
person.save()
# Ensure that the object is in the database with the correct _id
collection = self.db[self.Person._get_collection_name()]
person_obj = collection.find_one({'name': 'Test User'})
self.assertEqual(str(person_obj['_id']), '497ce96f395f2f052a494fd4')
def test_save_list(self):
"""Ensure that a list field may be properly saved.
"""
class Comment(EmbeddedDocument):
content = StringField()
class BlogPost(Document):
content = StringField()
comments = ListField(EmbeddedDocumentField(Comment))
tags = ListField(StringField())
BlogPost.drop_collection()
post = BlogPost(content='Went for a walk today...')
post.tags = tags = ['fun', 'leisure']
comments = [Comment(content='Good for you'), Comment(content='Yay.')]
post.comments = comments
post.save()
collection = self.db[BlogPost._get_collection_name()]
post_obj = collection.find_one()
self.assertEqual(post_obj['tags'], tags)
for comment_obj, comment in zip(post_obj['comments'], comments):
self.assertEqual(comment_obj['content'], comment['content'])
BlogPost.drop_collection()
def test_list_search_by_embedded(self):
class User(Document):
username = StringField(required=True)
meta = {'allow_inheritance': False}
class Comment(EmbeddedDocument):
comment = StringField()
user = ReferenceField(User,
required=True)
meta = {'allow_inheritance': False}
class Page(Document):
comments = ListField(EmbeddedDocumentField(Comment))
meta = {'allow_inheritance': False,
'indexes': [
{'fields': ['comments.user']}
]}
User.drop_collection()
Page.drop_collection()
u1 = User(username="wilson")
u1.save()
u2 = User(username="rozza")
u2.save()
u3 = User(username="hmarr")
u3.save()
p1 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world"),
Comment(user=u3, comment="Ping Pong"),
Comment(user=u1, comment="I like a beer")])
p1.save()
p2 = Page(comments=[Comment(user=u1, comment="Its very good"),
Comment(user=u2, comment="Hello world")])
p2.save()
p3 = Page(comments=[Comment(user=u3, comment="Its very good")])
p3.save()
p4 = Page(comments=[Comment(user=u2, comment="Heavy Metal song")])
p4.save()
self.assertEqual(
[p1, p2],
list(Page.objects.filter(comments__user=u1)))
self.assertEqual(
[p1, p2, p4],
list(Page.objects.filter(comments__user=u2)))
self.assertEqual(
[p1, p3],
list(Page.objects.filter(comments__user=u3)))
def test_save_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Ensure that the object is in the database
collection = self.db[self.Person._get_collection_name()]
employee_obj = collection.find_one({'name': 'Test Employee'})
self.assertEqual(employee_obj['name'], 'Test Employee')
self.assertEqual(employee_obj['age'], 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(employee_obj['details']['position'], 'Developer')
def test_embedded_update_after_save(self):
"""
Test update of `EmbeddedDocumentField` attached to a newly saved
document.
"""
class Page(EmbeddedDocument):
log_message = StringField(verbose_name="Log message",
required=True)
class Site(Document):
page = EmbeddedDocumentField(Page)
Site.drop_collection()
site = Site(page=Page(log_message="Warning: Dummy message"))
site.save()
# Update
site.page.log_message = "Error: Dummy message"
site.save()
site = Site.objects.first()
self.assertEqual(site.page.log_message, "Error: Dummy message")
def test_updating_an_embedded_document(self):
"""Ensure that a document with an embedded document field may be
saved in the database.
"""
class EmployeeDetails(EmbeddedDocument):
position = StringField()
class Employee(self.Person):
salary = IntField()
details = EmbeddedDocumentField(EmployeeDetails)
# Create employee object and save it to the database
employee = Employee(name='Test Employee', age=50, salary=20000)
employee.details = EmployeeDetails(position='Developer')
employee.save()
# Test updating an embedded document
promoted_employee = Employee.objects.get(name='Test Employee')
promoted_employee.details.position = 'Senior Developer'
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.name, 'Test Employee')
self.assertEqual(promoted_employee.age, 50)
# Ensure that the 'details' embedded object saved correctly
self.assertEqual(
promoted_employee.details.position, 'Senior Developer')
# Test removal
promoted_employee.details = None
promoted_employee.save()
promoted_employee.reload()
self.assertEqual(promoted_employee.details, None)
def test_object_mixins(self):
class NameMixin(object):
name = StringField()
class Foo(EmbeddedDocument, NameMixin):
quantity = IntField()
self.assertEqual(['name', 'quantity'], sorted(Foo._fields.keys()))
class Bar(Document, NameMixin):
widgets = StringField()
self.assertEqual(['id', 'name', 'widgets'], sorted(Bar._fields.keys()))
def test_mixin_inheritance(self):
class BaseMixIn(object):
count = IntField()
data = StringField()
class DoubleMixIn(BaseMixIn):
comment = StringField()
class TestDoc(Document, DoubleMixIn):
age = IntField()
TestDoc.drop_collection()
t = TestDoc(count=12, data="test",
comment="great!", age=19)
t.save()
t = TestDoc.objects.first()
self.assertEqual(t.age, 19)
self.assertEqual(t.comment, "great!")
self.assertEqual(t.data, "test")
self.assertEqual(t.count, 12)
def test_save_reference(self):
"""Ensure that a document reference field may be saved in the database.
"""
class BlogPost(Document):
meta = {'collection': 'blogpost_1'}
content = StringField()
author = ReferenceField(self.Person)
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV today... how exciting.')
# Should only reference author when saving
post.author = author
post.save()
post_obj = BlogPost.objects.first()
# Test laziness
self.assertTrue(isinstance(post_obj._data['author'],
bson.DBRef))
self.assertTrue(isinstance(post_obj.author, self.Person))
self.assertEqual(post_obj.author.name, 'Test User')
# Ensure that the dereferenced object may be changed and saved
post_obj.author.age = 25
post_obj.author.save()
author = list(self.Person.objects(name='Test User'))[-1]
self.assertEqual(author.age, 25)
BlogPost.drop_collection()
def test_duplicate_db_fields_raise_invalid_document_error(self):
"""Ensure a InvalidDocumentError is thrown if duplicate fields
declare the same db_field"""
def throw_invalid_document_error():
class Foo(Document):
name = StringField()
name2 = StringField(db_field='name')
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_invalid_son(self):
"""Raise an error if loading invalid data"""
class Occurrence(EmbeddedDocument):
number = IntField()
class Word(Document):
stem = StringField()
count = IntField(default=1)
forms = ListField(StringField(), default=list)
occurs = ListField(EmbeddedDocumentField(Occurrence), default=list)
def raise_invalid_document():
Word._from_son({'stem': [1, 2, 3], 'forms': 1, 'count': 'one',
'occurs': {"hello": None}})
self.assertRaises(InvalidDocumentError, raise_invalid_document)
def test_reverse_delete_rule_cascade_and_nullify(self):
"""Ensure that a referenced document is also deleted upon deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
# No effect on the BlogPost
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_with_document_inheritance(self):
"""Ensure that a referenced document is also deleted upon deletion
of a child document.
"""
class Writer(self.Person):
pass
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
reviewer = ReferenceField(self.Person, reverse_delete_rule=NULLIFY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = Writer(name='Test User')
author.save()
reviewer = Writer(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.reviewer = reviewer
post.save()
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewer, None)
# Delete the Writer should lead to deletion of the BlogPost
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_and_nullify_complex_field(self):
"""Ensure that a referenced document is also deleted upon deletion for
complex fields.
"""
class BlogPost(Document):
content = StringField()
authors = ListField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = ListField(ReferenceField(
self.Person, reverse_delete_rule=NULLIFY))
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
reviewer = self.Person(name='Re Viewer')
reviewer.save()
post = BlogPost(content='Watched some TV')
post.authors = [author]
post.reviewers = [reviewer]
post.save()
# Deleting the reviewer should have no effect on the BlogPost
reviewer.delete()
self.assertEqual(BlogPost.objects.count(), 1)
self.assertEqual(BlogPost.objects.get().reviewers, [])
# Delete the Person, which should lead to deletion of the BlogPost, too
author.delete()
self.assertEqual(BlogPost.objects.count(), 0)
def test_reverse_delete_rule_cascade_triggers_pre_delete_signal(self):
""" ensure the pre_delete signal is triggered upon a cascading deletion
setup a blog post with content, an author and editor
delete the author which triggers deletion of blogpost via cascade
blog post's pre_delete signal alters an editor attribute
"""
class Editor(self.Person):
review_queue = IntField(default=0)
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
editor = ReferenceField(Editor)
@classmethod
def pre_delete(cls, sender, document, **kwargs):
# decrement the docs-to-review count
document.editor.update(dec__review_queue=1)
signals.pre_delete.connect(BlogPost.pre_delete, sender=BlogPost)
self.Person.drop_collection()
BlogPost.drop_collection()
Editor.drop_collection()
author = self.Person(name='Will S.').save()
editor = Editor(name='Max P.', review_queue=1).save()
BlogPost(content='wrote some books', author=author,
editor=editor).save()
# delete the author, the post is also deleted due to the CASCADE rule
author.delete()
# the pre-delete signal should have decremented the editor's queue
editor = Editor.objects(name='Max P.').get()
self.assertEqual(editor.review_queue, 0)
def test_two_way_reverse_delete_rule(self):
"""Ensure that Bi-Directional relationships work with
reverse_delete_rule
"""
class Bar(Document):
content = StringField()
foo = ReferenceField('Foo')
class Foo(Document):
content = StringField()
bar = ReferenceField(Bar)
Bar.register_delete_rule(Foo, 'bar', NULLIFY)
Foo.register_delete_rule(Bar, 'foo', NULLIFY)
Bar.drop_collection()
Foo.drop_collection()
b = Bar(content="Hello")
b.save()
f = Foo(content="world", bar=b)
f.save()
b.foo = f
b.save()
f.delete()
self.assertEqual(Bar.objects.count(), 1) # No effect on the BlogPost
self.assertEqual(Bar.objects.get().foo, None)
def test_invalid_reverse_delete_rule_raise_errors(self):
def throw_invalid_document_error():
class Blog(Document):
content = StringField()
authors = MapField(ReferenceField(
self.Person, reverse_delete_rule=CASCADE))
reviewers = DictField(
field=ReferenceField(
self.Person,
reverse_delete_rule=NULLIFY))
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def throw_invalid_document_error_embedded():
class Parents(EmbeddedDocument):
father = ReferenceField('Person', reverse_delete_rule=DENY)
mother = ReferenceField('Person', reverse_delete_rule=DENY)
self.assertRaises(
InvalidDocumentError, throw_invalid_document_error_embedded)
def test_reverse_delete_rule_cascade_recurs(self):
"""Ensure that a chain of documents is also deleted upon cascaded
deletion.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=CASCADE)
class Comment(Document):
text = StringField()
post = ReferenceField(BlogPost, reverse_delete_rule=CASCADE)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
comment = Comment(text='Kudos.')
comment.post = post
comment.save()
# Delete the Person, which should lead to deletion of the BlogPost,
# and, recursively to the Comment, too
author.delete()
self.assertEqual(Comment.objects.count(), 0)
self.Person.drop_collection()
BlogPost.drop_collection()
Comment.drop_collection()
def test_reverse_delete_rule_deny(self):
"""Ensure that a document cannot be referenced if there are still
documents referring to it.
"""
class BlogPost(Document):
content = StringField()
author = ReferenceField(self.Person, reverse_delete_rule=DENY)
self.Person.drop_collection()
BlogPost.drop_collection()
author = self.Person(name='Test User')
author.save()
post = BlogPost(content='Watched some TV')
post.author = author
post.save()
# Delete the Person should be denied
self.assertRaises(OperationError, author.delete) # Should raise denied error
self.assertEqual(BlogPost.objects.count(), 1) # No objects may have been deleted
self.assertEqual(self.Person.objects.count(), 1)
# Other users, that don't have BlogPosts must be removable, like normal
author = self.Person(name='Another User')
author.save()
self.assertEqual(self.Person.objects.count(), 2)
author.delete()
self.assertEqual(self.Person.objects.count(), 1)
self.Person.drop_collection()
BlogPost.drop_collection()
def subclasses_and_unique_keys_works(self):
class A(Document):
pass
class B(A):
foo = BooleanField(unique=True)
A.drop_collection()
B.drop_collection()
A().save()
A().save()
B(foo=True).save()
self.assertEqual(A.objects.count(), 2)
self.assertEqual(B.objects.count(), 1)
A.drop_collection()
B.drop_collection()
def test_document_hash(self):
"""Test document in list, dict, set
"""
class User(Document):
pass
class BlogPost(Document):
pass
# Clear old datas
User.drop_collection()
BlogPost.drop_collection()
u1 = User.objects.create()
u2 = User.objects.create()
u3 = User.objects.create()
u4 = User() # New object
b1 = BlogPost.objects.create()
b2 = BlogPost.objects.create()
# in List
all_user_list = list(User.objects.all())
self.assertTrue(u1 in all_user_list)
self.assertTrue(u2 in all_user_list)
self.assertTrue(u3 in all_user_list)
self.assertFalse(u4 in all_user_list) # New object
self.assertFalse(b1 in all_user_list) # Other object
self.assertFalse(b2 in all_user_list) # Other object
# in Dict
all_user_dic = {}
for u in User.objects.all():
all_user_dic[u] = "OK"
self.assertEqual(all_user_dic.get(u1, False), "OK")
self.assertEqual(all_user_dic.get(u2, False), "OK")
self.assertEqual(all_user_dic.get(u3, False), "OK")
self.assertEqual(all_user_dic.get(u4, False), False) # New object
self.assertEqual(all_user_dic.get(b1, False), False) # Other object
self.assertEqual(all_user_dic.get(b2, False), False) # Other object
# in Set
all_user_set = set(User.objects.all())
self.assertTrue(u1 in all_user_set)
def test_picklable(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
# Test pickling changed data
pickle_doc.lists.append("3")
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
resurrected.string = "Two"
resurrected.save()
pickle_doc = PickleTest.objects.first()
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(pickle_doc.string, "Two")
self.assertEqual(pickle_doc.lists, ["1", "2", "3"])
def test_regular_document_pickle(self):
pickle_doc = PickleTest(number=1, string="One", lists=['1', '2'])
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
# Test that when a document's definition changes the new
# definition is used
fixtures.PickleTest = fixtures.NewDocumentPickleTest
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected.__class__,
fixtures.NewDocumentPickleTest)
self.assertEqual(resurrected._fields_ordered,
fixtures.NewDocumentPickleTest._fields_ordered)
self.assertNotEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
# The local PickleTest is still a ref to the original
fixtures.PickleTest = PickleTest
def test_dynamic_document_pickle(self):
pickle_doc = PickleDynamicTest(
name="test", number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleDyanmicEmbedded(foo="Bar")
pickled_doc = pickle.dumps(pickle_doc) # make sure pickling works even before the doc is saved
pickle_doc.save()
pickled_doc = pickle.dumps(pickle_doc)
resurrected = pickle.loads(pickled_doc)
self.assertEqual(resurrected, pickle_doc)
self.assertEqual(resurrected._fields_ordered,
pickle_doc._fields_ordered)
self.assertEqual(resurrected._dynamic_fields.keys(),
pickle_doc._dynamic_fields.keys())
self.assertEqual(resurrected.embedded, pickle_doc.embedded)
self.assertEqual(resurrected.embedded._fields_ordered,
pickle_doc.embedded._fields_ordered)
self.assertEqual(resurrected.embedded._dynamic_fields.keys(),
pickle_doc.embedded._dynamic_fields.keys())
def test_picklable_on_signals(self):
pickle_doc = PickleSignalsTest(
number=1, string="One", lists=['1', '2'])
pickle_doc.embedded = PickleEmbedded()
pickle_doc.save()
pickle_doc.delete()
def test_throw_invalid_document_error(self):
# test handles people trying to upsert
def throw_invalid_document_error():
class Blog(Document):
validate = DictField()
self.assertRaises(InvalidDocumentError, throw_invalid_document_error)
def test_mutating_documents(self):
class B(EmbeddedDocument):
field1 = StringField(default='field1')
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
A.drop_collection()
a = A()
a.save()
a.reload()
self.assertEqual(a.b.field1, 'field1')
class C(EmbeddedDocument):
c_field = StringField(default='cfield')
class B(EmbeddedDocument):
field1 = StringField(default='field1')
field2 = EmbeddedDocumentField(C, default=lambda: C())
class A(Document):
b = EmbeddedDocumentField(B, default=lambda: B())
a = A.objects()[0]
a.b.field2.c_field = 'new value'
a.save()
a.reload()
self.assertEqual(a.b.field2.c_field, 'new value')
def test_can_save_false_values(self):
"""Ensures you can save False values on save"""
class Doc(Document):
foo = StringField()
archived = BooleanField(default=False, required=True)
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_can_save_false_values_dynamic(self):
"""Ensures you can save False values on dynamic docs"""
class Doc(DynamicDocument):
foo = StringField()
Doc.drop_collection()
d = Doc()
d.save()
d.archived = False
d.save()
self.assertEqual(Doc.objects(archived=False).count(), 1)
def test_do_not_save_unchanged_references(self):
"""Ensures cascading saves dont auto update"""
class Job(Document):
name = StringField()
class Person(Document):
name = StringField()
age = IntField()
job = ReferenceField(Job)
Job.drop_collection()
Person.drop_collection()
job = Job(name="Job 1")
# job should not have any changed fields after the save
job.save()
person = Person(name="name", age=10, job=job)
from pymongo.collection import Collection
orig_update = Collection.update
try:
def fake_update(*args, **kwargs):
self.fail("Unexpected update for %s" % args[0].name)
return orig_update(*args, **kwargs)
Collection.update = fake_update
person.save()
finally:
Collection.update = orig_update
def test_db_alias_tests(self):
""" DB Alias tests """
# mongoenginetest - Is default connection alias from setUp()
# Register Aliases
register_connection('testdb-1', 'mongoenginetest2')
register_connection('testdb-2', 'mongoenginetest3')
register_connection('testdb-3', 'mongoenginetest4')
class User(Document):
name = StringField()
meta = {"db_alias": "testdb-1"}
class Book(Document):
name = StringField()
meta = {"db_alias": "testdb-2"}
# Drops
User.drop_collection()
Book.drop_collection()
# Create
bob = User.objects.create(name="Bob")
hp = Book.objects.create(name="Harry Potter")
# Selects
self.assertEqual(User.objects.first(), bob)
self.assertEqual(Book.objects.first(), hp)
# DeReference
class AuthorBooks(Document):
author = ReferenceField(User)
book = ReferenceField(Book)
meta = {"db_alias": "testdb-3"}
# Drops
AuthorBooks.drop_collection()
ab = AuthorBooks.objects.create(author=bob, book=hp)
# select
self.assertEqual(AuthorBooks.objects.first(), ab)
self.assertEqual(AuthorBooks.objects.first().book, hp)
self.assertEqual(AuthorBooks.objects.first().author, bob)
self.assertEqual(AuthorBooks.objects.filter(author=bob).first(), ab)
self.assertEqual(AuthorBooks.objects.filter(book=hp).first(), ab)
# DB Alias
self.assertEqual(User._get_db(), get_db("testdb-1"))
self.assertEqual(Book._get_db(), get_db("testdb-2"))
self.assertEqual(AuthorBooks._get_db(), get_db("testdb-3"))
# Collections
self.assertEqual(
User._get_collection(),
get_db("testdb-1")[User._get_collection_name()])
self.assertEqual(
Book._get_collection(),
get_db("testdb-2")[Book._get_collection_name()])
self.assertEqual(
AuthorBooks._get_collection(),
get_db("testdb-3")[AuthorBooks._get_collection_name()])
def test_db_alias_overrides(self):
"""db_alias can be overriden
"""
# Register a connection with db_alias testdb-2
register_connection('testdb-2', 'mongoenginetest2')
class A(Document):
"""Uses default db_alias
"""
name = StringField()
meta = {"allow_inheritance": True}
class B(A):
"""Uses testdb-2 db_alias
"""
meta = {"db_alias": "testdb-2"}
A.objects.all()
self.assertEqual('testdb-2', B._meta.get('db_alias'))
self.assertEqual('mongoenginetest',
A._get_collection().database.name)
self.assertEqual('mongoenginetest2',
B._get_collection().database.name)
def test_db_alias_propagates(self):
"""db_alias propagates?
"""
register_connection('testdb-1', 'mongoenginetest2')
class A(Document):
name = StringField()
meta = {"db_alias": "testdb-1", "allow_inheritance": True}
class B(A):
pass
self.assertEqual('testdb-1', B._meta.get('db_alias'))
def test_db_ref_usage(self):
""" DB Ref usage in dict_fields"""
class User(Document):
name = StringField()
class Book(Document):
name = StringField()
author = ReferenceField(User)
extra = DictField()
meta = {
'ordering': ['+name']
}
def __unicode__(self):
return self.name
def __str__(self):
return self.name
# Drops
User.drop_collection()
Book.drop_collection()
# Authors
bob = User.objects.create(name="Bob")
jon = User.objects.create(name="Jon")
# Redactors
karl = User.objects.create(name="Karl")
susan = User.objects.create(name="Susan")
peter = User.objects.create(name="Peter")
# Bob
Book.objects.create(name="1", author=bob, extra={
"a": bob.to_dbref(), "b": [karl.to_dbref(), susan.to_dbref()]})
Book.objects.create(name="2", author=bob, extra={
"a": bob.to_dbref(), "b": karl.to_dbref()})
Book.objects.create(name="3", author=bob, extra={
"a": bob.to_dbref(), "c": [jon.to_dbref(), peter.to_dbref()]})
Book.objects.create(name="4", author=bob)
# Jon
Book.objects.create(name="5", author=jon)
Book.objects.create(name="6", author=peter)
Book.objects.create(name="7", author=jon)
Book.objects.create(name="8", author=jon)
Book.objects.create(name="9", author=jon,
extra={"a": peter.to_dbref()})
# Checks
self.assertEqual(",".join([str(b) for b in Book.objects.all()]),
"1,2,3,4,5,6,7,8,9")
# bob related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a=bob) |
Q(author=bob) |
Q(extra__b=bob))]),
"1,2,3,4")
# Susan & Karl related books
self.assertEqual(",".join([str(b) for b in Book.objects.filter(
Q(extra__a__all=[karl, susan]) |
Q(author__all=[karl, susan]) |
Q(extra__b__all=[
karl.to_dbref(), susan.to_dbref()]))
]), "1")
# $Where
self.assertEqual(u",".join([str(b) for b in Book.objects.filter(
__raw__={
"$where": """
function(){
return this.name == '1' ||
this.name == '2';}"""
})]),
"1,2")
def test_switch_db_instance(self):
register_connection('testdb-1', 'mongoenginetest2')
class Group(Document):
name = StringField()
Group.drop_collection()
with switch_db(Group, 'testdb-1') as Group:
Group.drop_collection()
Group(name="hello - default").save()
self.assertEqual(1, Group.objects.count())
group = Group.objects.first()
group.switch_db('testdb-1')
group.name = "hello - testdb!"
group.save()
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - testdb!", group.name)
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Slightly contrived now - perform an update
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.update(set__name="hello - update")
with switch_db(Group, 'testdb-1') as Group:
group = Group.objects.first()
self.assertEqual("hello - update", group.name)
Group.drop_collection()
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
# Totally contrived now - perform a delete
# Only works as they have the same object_id
group.switch_db('testdb-1')
group.delete()
with switch_db(Group, 'testdb-1') as Group:
self.assertEqual(0, Group.objects.count())
group = Group.objects.first()
self.assertEqual("hello - default", group.name)
def test_load_undefined_fields(self):
class User(Document):
name = StringField()
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_with_strict_false(self):
class User(Document):
name = StringField()
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'foo': 'Bar',
'data': [1, 2, 3]
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertFalse(hasattr(user, 'foo'))
self.assertEqual(user._data['foo'], 'Bar')
self.assertFalse(hasattr(user, 'data'))
self.assertEqual(user._data['data'], [1, 2, 3])
def test_load_undefined_fields_on_embedded_document(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false_on_doc(self):
class Thing(EmbeddedDocument):
name = StringField()
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
meta = {'strict': False}
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
self.assertRaises(FieldDoesNotExist, User.objects.first)
def test_load_undefined_fields_on_embedded_document_with_strict_false(self):
class Thing(EmbeddedDocument):
name = StringField()
meta = {'strict': False}
class User(Document):
name = StringField()
thing = EmbeddedDocumentField(Thing)
User.drop_collection()
User._get_collection().save({
'name': 'John',
'thing': {
'name': 'My thing',
'foo': 'Bar',
'data': [1, 2, 3]
}
})
user = User.objects.first()
self.assertEqual(user.name, 'John')
self.assertEqual(user.thing.name, 'My thing')
self.assertFalse(hasattr(user.thing, 'foo'))
self.assertEqual(user.thing._data['foo'], 'Bar')
self.assertFalse(hasattr(user.thing, 'data'))
self.assertEqual(user.thing._data['data'], [1, 2, 3])
def test_spaces_in_keys(self):
class Embedded(DynamicEmbeddedDocument):
pass
class Doc(DynamicDocument):
pass
Doc.drop_collection()
doc = Doc()
setattr(doc, 'hello world', 1)
doc.save()
one = Doc.objects.filter(**{'hello world': 1}).count()
self.assertEqual(1, one)
def test_shard_key(self):
class LogEntry(Document):
machine = StringField()
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_in_embedded_document(self):
class Foo(EmbeddedDocument):
foo = StringField()
class Bar(Document):
meta = {
'shard_key': ('foo.foo',)
}
foo = EmbeddedDocumentField(Foo)
bar = StringField()
foo_doc = Foo(foo='hello')
bar_doc = Bar(foo=foo_doc, bar='world')
bar_doc.save()
self.assertTrue(bar_doc.id is not None)
bar_doc.bar = 'baz'
bar_doc.save()
def change_shard_key():
bar_doc.foo.foo = 'something'
bar_doc.save()
self.assertRaises(OperationError, change_shard_key)
def test_shard_key_primary(self):
class LogEntry(Document):
machine = StringField(primary_key=True)
log = StringField()
meta = {
'shard_key': ('machine',)
}
LogEntry.drop_collection()
log = LogEntry()
log.machine = "Localhost"
log.save()
self.assertTrue(log.id is not None)
log.log = "Saving"
log.save()
def change_shard_key():
log.machine = "127.0.0.1"
self.assertRaises(OperationError, change_shard_key)
def test_kwargs_simple(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
doc = EmbeddedDocumentField(Embedded)
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.doc == other.doc)
classic_doc = Doc(doc_name="my doc", doc=Embedded(name="embedded doc"))
dict_doc = Doc(**{"doc_name": "my doc",
"doc": {"name": "embedded doc"}})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_kwargs_complex(self):
class Embedded(EmbeddedDocument):
name = StringField()
class Doc(Document):
doc_name = StringField()
docs = ListField(EmbeddedDocumentField(Embedded))
def __eq__(self, other):
return (self.doc_name == other.doc_name and
self.docs == other.docs)
classic_doc = Doc(doc_name="my doc", docs=[
Embedded(name="embedded doc1"),
Embedded(name="embedded doc2")])
dict_doc = Doc(**{"doc_name": "my doc",
"docs": [{"name": "embedded doc1"},
{"name": "embedded doc2"}]})
self.assertEqual(classic_doc, dict_doc)
self.assertEqual(classic_doc._data, dict_doc._data)
def test_positional_creation(self):
"""Ensure that document may be created using positional arguments.
"""
person = self.Person("Test User", 42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation(self):
"""Ensure that document may be created using mixed arguments.
"""
person = self.Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_mixed_creation_dynamic(self):
"""Ensure that document may be created using mixed arguments.
"""
class Person(DynamicDocument):
name = StringField()
person = Person("Test User", age=42)
self.assertEqual(person.name, "Test User")
self.assertEqual(person.age, 42)
def test_bad_mixed_creation(self):
"""Ensure that document gives correct error when duplicating arguments
"""
def construct_bad_instance():
return self.Person("Test User", 42, name="Bad User")
self.assertRaises(TypeError, construct_bad_instance)
def test_data_contains_id_field(self):
"""Ensure that asking for _data returns 'id'
"""
class Person(Document):
name = StringField()
Person.drop_collection()
Person(name="Harry Potter").save()
person = Person.objects.first()
self.assertTrue('id' in person._data.keys())
self.assertEqual(person._data.get('id'), person.id)
def test_complex_nesting_document_and_embedded_document(self):
class Macro(EmbeddedDocument):
value = DynamicField(default="UNDEFINED")
class Parameter(EmbeddedDocument):
macros = MapField(EmbeddedDocumentField(Macro))
def expand(self):
self.macros["test"] = Macro()
class Node(Document):
parameters = MapField(EmbeddedDocumentField(Parameter))
def expand(self):
self.flattened_parameter = {}
for parameter_name, parameter in self.parameters.iteritems():
parameter.expand()
class NodesSystem(Document):
name = StringField(required=True)
nodes = MapField(ReferenceField(Node, dbref=False))
def save(self, *args, **kwargs):
for node_name, node in self.nodes.iteritems():
node.expand()
node.save(*args, **kwargs)
super(NodesSystem, self).save(*args, **kwargs)
NodesSystem.drop_collection()
Node.drop_collection()
system = NodesSystem(name="system")
system.nodes["node"] = Node()
system.save()
system.nodes["node"].parameters["param"] = Parameter()
system.save()
system = NodesSystem.objects.first()
self.assertEqual(
"UNDEFINED",
system.nodes["node"].parameters["param"].macros["test"].value)
def test_embedded_document_equality(self):
class Test(Document):
field = StringField(required=True)
class Embedded(EmbeddedDocument):
ref = ReferenceField(Test)
Test.drop_collection()
test = Test(field='123').save() # has id
e = Embedded(ref=test)
f1 = Embedded._from_son(e.to_mongo())
f2 = Embedded._from_son(e.to_mongo())
self.assertEqual(f1, f2)
f1.ref # Dereferences lazily
self.assertEqual(f1, f2)
def test_dbref_equality(self):
class Test2(Document):
name = StringField()
class Test3(Document):
name = StringField()
class Test(Document):
name = StringField()
test2 = ReferenceField('Test2')
test3 = ReferenceField('Test3')
Test.drop_collection()
Test2.drop_collection()
Test3.drop_collection()
t2 = Test2(name='a')
t2.save()
t3 = Test3(name='x')
t3.id = t2.id
t3.save()
t = Test(name='b', test2=t2, test3=t3)
f = Test._from_son(t.to_mongo())
dbref2 = f._data['test2']
obj2 = f.test2
self.assertTrue(isinstance(dbref2, DBRef))
self.assertTrue(isinstance(obj2, Test2))
self.assertTrue(obj2.id == dbref2.id)
self.assertTrue(obj2 == dbref2)
self.assertTrue(dbref2 == obj2)
dbref3 = f._data['test3']
obj3 = f.test3
self.assertTrue(isinstance(dbref3, DBRef))
self.assertTrue(isinstance(obj3, Test3))
self.assertTrue(obj3.id == dbref3.id)
self.assertTrue(obj3 == dbref3)
self.assertTrue(dbref3 == obj3)
self.assertTrue(obj2.id == obj3.id)
self.assertTrue(dbref2.id == dbref3.id)
self.assertFalse(dbref2 == dbref3)
self.assertFalse(dbref3 == dbref2)
self.assertTrue(dbref2 != dbref3)
self.assertTrue(dbref3 != dbref2)
self.assertFalse(obj2 == dbref3)
self.assertFalse(dbref3 == obj2)
self.assertTrue(obj2 != dbref3)
self.assertTrue(dbref3 != obj2)
self.assertFalse(obj3 == dbref2)
self.assertFalse(dbref2 == obj3)
self.assertTrue(obj3 != dbref2)
self.assertTrue(dbref2 != obj3)
def test_default_values(self):
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
p = Person(name='alon')
p.save()
orig_created_on = Person.objects().only('created_on')[0].created_on
p2 = Person.objects().only('name')[0]
p2.name = 'alon2'
p2.save()
p3 = Person.objects().only('created_on')[0]
self.assertEquals(orig_created_on, p3.created_on)
class Person(Document):
created_on = DateTimeField(default=lambda: datetime.utcnow())
name = StringField()
height = IntField(default=189)
p4 = Person.objects()[0]
p4.save()
self.assertEquals(p4.height, 189)
self.assertEquals(Person.objects(height=189).count(), 1)
def test_from_son(self):
# 771
class MyPerson(self.Person):
meta = dict(shard_key=["id"])
p = MyPerson.from_json('{"name": "name", "age": 27}', created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
p = MyPerson._from_son({"name": "name", "age": 27}, created=True)
self.assertEquals(p.id, None)
p.id = "12345" # in case it is not working: "OperationError: Shard Keys are immutable..." will be raised here
def test_null_field(self):
# 734
class User(Document):
name = StringField()
height = IntField(default=184, null=True)
str_fld = StringField(null=True)
int_fld = IntField(null=True)
flt_fld = FloatField(null=True)
dt_fld = DateTimeField(null=True)
cdt_fld = ComplexDateTimeField(null=True)
User.objects.delete()
u = User(name='user')
u.save()
u_from_db = User.objects.get(name='user')
u_from_db.height = None
u_from_db.save()
self.assertEquals(u_from_db.height, None)
# 864
self.assertEqual(u_from_db.str_fld, None)
self.assertEqual(u_from_db.int_fld, None)
self.assertEqual(u_from_db.flt_fld, None)
self.assertEqual(u_from_db.dt_fld, None)
self.assertEqual(u_from_db.cdt_fld, None)
# 735
User.objects.delete()
u = User(name='user')
u.save()
User.objects(name='user').update_one(set__height=None, upsert=True)
u_from_db = User.objects.get(name='user')
self.assertEquals(u_from_db.height, None)
def test_not_saved_eq(self):
"""Ensure we can compare documents not saved.
"""
class Person(Document):
pass
p = Person()
p1 = Person()
self.assertNotEqual(p, p1)
self.assertEqual(p, p)
def test_list_iter(self):
# 914
class B(EmbeddedDocument):
v = StringField()
class A(Document):
l = ListField(EmbeddedDocumentField(B))
A.objects.delete()
A(l=[B(v='1'), B(v='2'), B(v='3')]).save()
a = A.objects.get()
self.assertEqual(a.l._instance, a)
for idx, b in enumerate(a.l):
self.assertEqual(b._instance, a)
self.assertEqual(idx, 2)
if __name__ == '__main__':
unittest.main() | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.index;
import java.util.Collections;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Properties;
import java.util.Set;
import java.util.stream.Collectors;
import org.springframework.util.AntPathMatcher;
import org.springframework.util.ClassUtils;
import org.springframework.util.LinkedMultiValueMap;
import org.springframework.util.MultiValueMap;
/**
* Provide access to the candidates that are defined in {@code META-INF/spring.components}
* component index files (see {@link #CandidateComponentsIndex(List)}) or registered
* programmatically (see {@link #CandidateComponentsIndex()}).
*
* <p>An arbitrary number of stereotypes can be registered (and queried) on the index: a
* typical example is the fully qualified name of an annotation that flags the class for
* a certain use case. The following call returns all the {@code @Component}
* <b>candidate</b> types for the {@code com.example} package (and its sub-packages):
* <pre class="code">
* Set<String> candidates = index.getCandidateTypes(
* "com.example", "org.springframework.stereotype.Component");
* </pre>
*
* <p>The {@code type} is usually the fully qualified name of a class, though this is
* not a rule. Similarly, the {@code stereotype} is usually the fully qualified name of
* an annotation type, but it can be any marker really.
*
* @author Stephane Nicoll
* @author Juergen Hoeller
* @since 5.0
*/
public class CandidateComponentsIndex {
private static final AntPathMatcher pathMatcher = new AntPathMatcher(".");
private final Set<String> registeredScans = new LinkedHashSet<>();
private final MultiValueMap<String, Entry> index = new LinkedMultiValueMap<>();
private final boolean complete;
/**
* Create a new index instance from parsed component index files.
*/
CandidateComponentsIndex(List<Properties> content) {
for (Properties entry : content) {
entry.forEach((type, values) -> {
String[] stereotypes = ((String) values).split(",");
for (String stereotype : stereotypes) {
this.index.add(stereotype, new Entry((String) type));
}
});
}
this.complete = true;
}
/**
* Create a new index instance for programmatic population.
* @since 7.0
* @see #registerScan(String...)
* @see #registerCandidateType(String, String...)
*/
public CandidateComponentsIndex() {
this.complete = false;
}
/**
* Programmatically register the given base packages (or base package patterns)
* as scanned.
* @since 7.0
* @see #registerCandidateType(String, String...)
*/
public void registerScan(String... basePackages) {
Collections.addAll(this.registeredScans, basePackages);
}
/**
* Return the registered base packages (or base package patterns).
* @since 7.0
* @see #registerScan(String...)
*/
public Set<String> getRegisteredScans() {
return this.registeredScans;
}
/**
* Determine whether this index contains an entry for the given base package
* (or base package pattern).
* @since 7.0
*/
public boolean hasScannedPackage(String packageName) {
return (this.complete ||
this.registeredScans.stream().anyMatch(basePackage -> matchPackage(basePackage, packageName)));
}
/**
* Programmatically register one or more stereotypes for the given candidate type.
* <p>Note that the containing packages for candidates are not automatically
* considered scanned packages. Make sure to call {@link #registerScan(String...)}
* with the scan-specific base package accordingly.
* @since 7.0
* @see #registerScan(String...)
*/
public void registerCandidateType(String type, String... stereotypes) {
for (String stereotype : stereotypes) {
this.index.add(stereotype, new Entry(type));
}
}
/**
* Return the registered stereotype packages (or base package patterns).
* @since 7.0
*/
public Set<String> getRegisteredStereotypes() {
return this.index.keySet();
}
/**
* Return the candidate types that are associated with the specified stereotype.
* @param basePackage the package to check for candidates
* @param stereotype the stereotype to use
* @return the candidate types associated with the specified {@code stereotype}
* or an empty set if none has been found for the specified {@code basePackage}
*/
public Set<String> getCandidateTypes(String basePackage, String stereotype) {
List<Entry> candidates = this.index.get(stereotype);
if (candidates != null) {
return candidates.stream()
.filter(entry -> entry.match(basePackage))
.map(entry -> entry.type)
.collect(Collectors.toSet());
}
return Collections.emptySet();
}
private static boolean matchPackage(String basePackage, String packageName) {
if (pathMatcher.isPattern(basePackage)) {
return pathMatcher.match(basePackage, packageName);
}
else {
return packageName.equals(basePackage) || packageName.startsWith(basePackage + ".");
}
}
private static class Entry {
final String type;
private final String packageName;
Entry(String type) {
this.type = type;
this.packageName = ClassUtils.getPackageName(type);
}
public boolean match(String basePackage) {
return matchPackage(basePackage, this.packageName);
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/main/java/org/springframework/context/index/CandidateComponentsIndex.java |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_KERNELS_SPARSE_MAT_MUL_OP_H_
#define TENSORFLOW_CORE_KERNELS_SPARSE_MAT_MUL_OP_H_
#define EIGEN_USE_THREADS
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define EIGEN_USE_GPU
#endif
#include "Eigen/Core" // from @eigen_archive
#include "Eigen/SparseCore" // from @eigen_archive
#include "unsupported/Eigen/CXX11/Tensor" // from @eigen_archive
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/kernels/cwise_ops_common.h"
#include "tensorflow/core/kernels/dense_update_functor.h"
#include "tensorflow/core/kernels/fill_functor.h"
#include "tensorflow/core/kernels/sparse/kernels.h"
#include "tensorflow/core/kernels/sparse/sparse_matrix.h"
#include "tensorflow/core/kernels/sparse/transpose_op.h"
#include "tensorflow/core/kernels/transpose_functor.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#include "tensorflow/core/platform/threadpool.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "tensorflow/core/util/cuda_sparse.h"
#include "tensorflow/core/util/gpu_solvers.h"
#endif
namespace tensorflow {
// TODO(anudhyan): These constants may be tuned based on the performance of
// 'benchmark_sparse_matrix_mat_vec_mul'. We would like to find constants
// which work across hardware platforms for typical matrix sizes. It should be
// possible to observe at least 30-50% improvement as we increase the number
// of threads by 1. If not, then it may we worth increasing kMaxShards and
// kNumShardsPerThread. However, once we have too many shards, latency may be
// dominated by per-shard overhead.
//
// Maximum number of shards into which to divide the computation for each CSR
// Sparse Matrix instance.
static constexpr int32_t kMaxShards = 20;
// Number of shards allocated to each thread.
static constexpr int32_t kNumShardsPerThread = 3;
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
// Abstract OpKernel to compute sparse-dense matrix multiplication.
//
// Implements a kernel which, given a SparseMatrix `a` and dense Tensor `b`,
// computes a dense Tensor `c` satisfying `c = a * b` where * denotes matrix
// multiplication.
//
// The boolean attributes `transpose_a` and `adjoint_a` will transpose or
// adjoint `a` before multiplication, respectively. At most one of these
// attributes must be set to True. Corresponding attributes will transpose or
// adjoint `b` or the output (after multiplication).
//
// The rank of both `a` and `b` must be equal and their shapes must be
// compatible for matrix multiplication. Otherwise, InvalidArgument runtime
// errors will be thrown. Only rank 2 or rank 3 inputs are supported.
//
template <typename Device, typename T>
class CSRMatMulOp : public OpKernel {
public:
explicit CSRMatMulOp(OpKernelConstruction* c) : OpKernel(c) {
OP_REQUIRES_OK(c, c->GetAttr("transpose_a", &transpose_a_));
OP_REQUIRES_OK(c, c->GetAttr("transpose_b", &transpose_b_));
bool adjoint_a;
OP_REQUIRES_OK(c, c->GetAttr("adjoint_a", &adjoint_a));
OP_REQUIRES(c, !(adjoint_a && transpose_a_),
absl::InvalidArgumentError(
"Only one of adjoint_a and transpose_a may be true."));
bool adjoint_b;
OP_REQUIRES_OK(c, c->GetAttr("adjoint_b", &adjoint_b));
OP_REQUIRES(c, !(adjoint_b && transpose_b_),
absl::InvalidArgumentError(
"Only one of adjoint_b and transpose_b may be true."));
OP_REQUIRES_OK(c, c->GetAttr("transpose_output", &transpose_output_));
OP_REQUIRES_OK(c, c->GetAttr("conjugate_output", &conjugate_output_));
transpose_a_ |= adjoint_a;
transpose_b_ |= adjoint_b;
if (is_complex<T>::value) {
conjugate_a_ = adjoint_a;
conjugate_b_ = adjoint_b;
} else {
conjugate_a_ = false;
conjugate_b_ = false;
}
}
~CSRMatMulOp() override {}
absl::Status ValidateInputs(const CSRSparseMatrix& sparse_matrix_a,
const Tensor& dense_tensor_b, int* rank,
int64_t* batch_size) {
if (sparse_matrix_a.dtype() != dense_tensor_b.dtype()) {
return absl::InvalidArgumentError(absl::StrCat(
"Input types don't match. a.dtype == ",
DataTypeString(sparse_matrix_a.dtype()),
" vs. b.dtype == ", DataTypeString(dense_tensor_b.dtype())));
}
*rank = sparse_matrix_a.dims();
// TODO(ebrevdo): Add support for broadcasting matmul.
if (*rank != dense_tensor_b.dims()) {
return absl::InvalidArgumentError(
absl::StrCat("Ranks of a and b must match, saw: ", *rank, " vs. ",
dense_tensor_b.dims(), "."));
}
// A valid CSR SparseMatrix has rank 2 or rank 3.
*batch_size = (*rank == 2) ? 1 : dense_tensor_b.dim_size(0);
if (sparse_matrix_a.batch_size() != *batch_size) {
return absl::InvalidArgumentError(absl::StrCat(
"Batch sizes of a and b must match, saw: ",
sparse_matrix_a.batch_size(), " vs. ", *batch_size, "."));
}
const auto& a_dense_shape = sparse_matrix_a.dense_shape().vec<int64_t>();
const int64_t a_inner_dim =
a_dense_shape(this->transpose_a_ ? *rank - 2 : *rank - 1);
const int64_t b_inner_dim =
dense_tensor_b.dim_size(this->transpose_b_ ? *rank - 1 : *rank - 2);
if (a_inner_dim != b_inner_dim) {
return absl::InvalidArgumentError(
absl::StrCat("Inner product dimensions of A and B do not agree. ",
"Shapes are: ", TensorShape(a_dense_shape).DebugString(),
" vs. ", dense_tensor_b.shape().DebugString()));
}
return absl::OkStatus();
}
public:
bool transpose_a_;
bool transpose_b_;
bool conjugate_a_;
bool conjugate_b_;
bool transpose_output_;
bool conjugate_output_;
};
// CPU Kernel to compute sparse-dense matrix multiplication.
//
// Uses Eigen SparseMatrix to compute the sparse-dense multiplication between
// a CSR SparseMatrix `a` and dense Tensor `b`. If intra-op parallelism is
// available, the implementation parallelizes the computation across each row
// of the sparse matrix.
template <typename T>
class CSRMatMulCPUOp : public CSRMatMulOp<CPUDevice, T> {
using SparseMatrix = Eigen::SparseMatrix<T, Eigen::RowMajor>;
using Matrix =
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using ConstMatrixMap = Eigen::Map<const Matrix>;
using MatrixMap = Eigen::Map<Matrix>;
public:
explicit CSRMatMulCPUOp(OpKernelConstruction* c)
: CSRMatMulOp<CPUDevice, T>(c) {}
~CSRMatMulCPUOp() override {}
void Compute(OpKernelContext* ctx) final {
const CSRSparseMatrix* sparse_matrix_a;
OP_REQUIRES_OK(ctx, ExtractVariantFromInput(ctx, 0, &sparse_matrix_a));
const Tensor& matrix_b = ctx->input(1);
int rank;
int64_t batch_size;
OP_REQUIRES_OK(ctx, this->ValidateInputs(*sparse_matrix_a, matrix_b, &rank,
&batch_size));
const auto dense_shape = sparse_matrix_a->dense_shape().vec<int64_t>();
int64_t num_lhs_rows = dense_shape(rank - 2);
int64_t num_lhs_cols = dense_shape(rank - 1);
int64_t num_rhs_rows = matrix_b.dim_size(rank - 2);
int64_t num_rhs_cols = matrix_b.dim_size(rank - 1);
if (this->transpose_a_) {
std::swap(num_lhs_rows, num_lhs_cols);
}
// Possibly transpose the dense Tensor b.
const Tensor* rhs = &matrix_b;
Tensor b_transposed;
if (this->transpose_b_) {
OP_REQUIRES_OK(
ctx, TransposeAndConjugateTensor(ctx, matrix_b, this->conjugate_b_,
&b_transposed));
rhs = &b_transposed;
std::swap(num_rhs_rows, num_rhs_cols);
}
// If we're transposing the output, then allocate a temporary buffer to
// store the output. Otherwise allocate the output directly.
Tensor* output = nullptr;
Tensor* matmul_result = nullptr;
Tensor output_transposed;
OP_REQUIRES_OK(
ctx, AllocateOutput(ctx, rank, batch_size, num_lhs_rows, num_rhs_cols,
this->transpose_output_, &output,
&output_transposed, &matmul_result));
if (!this->transpose_a_) {
SparseDenseMatMulWithoutTransposedLHS(
ctx, batch_size, num_lhs_rows, *sparse_matrix_a, *rhs, matmul_result);
} else { // transpose_a_ == true
SparseDenseMatMulWithTransposedLHS(ctx, batch_size, num_lhs_rows,
num_lhs_cols, *sparse_matrix_a, *rhs,
matmul_result);
}
// Transpose (and conjugate) the output if necessary.
// Note that conjugate is only true if transpose is also true.
if (this->transpose_output_) {
OP_REQUIRES_OK(
ctx, TransposeAndConjugateAllocatedTensor(
ctx, output_transposed, this->conjugate_output_, output));
} else if (this->conjugate_output_) {
functor::maybe_conj_inplace<CPUDevice, T>::run(
ctx->eigen_device<CPUDevice>(), output);
}
}
private:
// Allocates the output with the appropriate shape. Additionally, if
// transpose_output is True, allocates a temporary buffer with the transposed
// output. 'matmul_result' points to either output or output_transposed, based
// on whether transpose_output is True.
absl::Status AllocateOutput(OpKernelContext* ctx, const int32_t rank,
const int64_t batch_size, const int64_t num_rows,
const int64_t num_cols,
const bool transpose_output, Tensor** output,
Tensor* output_transposed,
Tensor** matmul_result) {
TensorShape output_shape;
if (rank == 3) {
TF_RETURN_IF_ERROR(output_shape.AddDimWithStatus(batch_size));
}
if (!transpose_output) {
output_shape.AppendShape({num_rows, num_cols});
TF_RETURN_IF_ERROR(ctx->allocate_output(0, output_shape, output));
*matmul_result = *output;
} else {
TensorShape output_transposed_shape = output_shape;
output_transposed_shape.AppendShape({num_rows, num_cols});
output_shape.AppendShape({num_cols, num_rows});
TF_RETURN_IF_ERROR(ctx->allocate_temp(DataTypeToEnum<T>::value,
output_transposed_shape,
output_transposed));
TF_RETURN_IF_ERROR(ctx->allocate_output(0, output_shape, output));
*matmul_result = output_transposed;
}
return absl::OkStatus();
}
// Returns an Eigen::Ref expression of a sparse sub-matrix from the given
// contiguous segment of rows of the CSR Sparse Matrix.
Eigen::Ref<const SparseMatrix> GetSparseMatrixRef(
const CSRSparseMatrix& csr_matrix, const int batch_index,
const int64_t row_begin, const int64_t num_shard_rows,
std::vector<int32_t>* row_ptrs) {
// Compute the row pointers of the sparse sub-matrix.
row_ptrs->resize(num_shard_rows + 1);
const int64_t row_offset =
csr_matrix.row_pointers_vec(batch_index)(row_begin);
for (int64_t row_idx = 0; row_idx <= num_shard_rows; ++row_idx) {
row_ptrs->at(row_idx) =
csr_matrix.row_pointers_vec(batch_index)(row_begin + row_idx) -
row_offset;
}
const int64_t num_cols =
csr_matrix.dense_shape().vec<int64_t>()(csr_matrix.dims() - 1);
return Eigen::Map<const SparseMatrix>(
num_shard_rows /* num_rows */, num_cols /* num_cols */,
row_ptrs->at(num_shard_rows) /* total_nnz */, row_ptrs->data(),
csr_matrix.col_indices_vec(batch_index).data() + row_offset,
csr_matrix.values_vec<T>(batch_index).data() + row_offset);
}
// Sparse-Dense Matrix Multiplication between a CSRSparseMatrix (LHS) and a
// dense Tensor (RHS).
void SparseDenseMatMulWithoutTransposedLHS(OpKernelContext* ctx,
const int64_t batch_size,
const int64_t num_lhs_rows,
const CSRSparseMatrix& lhs,
const Tensor& rhs,
Tensor* output) {
// Parallelize matrix multiplication across batch dimensions and across
// rows in each batch.
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
const int32_t num_threads = worker_threads.num_threads;
const int64_t block_size =
num_lhs_rows / std::max(kMaxShards, kNumShardsPerThread * num_threads);
const int64_t num_rhs_rows = rhs.dim_size(rhs.dims() - 2);
const int64_t num_rhs_cols = rhs.dim_size(rhs.dims() - 1);
worker_threads.workers->ParallelFor(
batch_size * num_lhs_rows /* total */,
thread::ThreadPool::SchedulingParams(
thread::ThreadPool::SchedulingStrategy::
kFixedBlockSize /* strategy */,
absl::nullopt /* cost_per_unit */, block_size),
[&](int64_t batch_and_row_begin, int64_t batch_and_row_end) {
HandleBatchAndRowRange(
num_lhs_rows, batch_and_row_begin, batch_and_row_end,
[&](int64_t batch_idx, int64_t row_begin, int64_t row_end) {
const int64_t num_shard_rows = row_end - row_begin;
// Define an Eigen::SparseMatrix over the row range:
// [row_begin, row_end) of the CSR SparseMatrix A.
std::vector<int32_t> row_ptrs;
auto sparse_matrix = GetSparseMatrixRef(
lhs, batch_idx, row_begin, num_shard_rows, &row_ptrs);
// Map the corresponding rows of the rhs.
ConstMatrixMap rhs_map(rhs.flat<T>().data() + batch_idx *
num_rhs_rows *
num_rhs_cols,
num_rhs_rows, num_rhs_cols);
// Write to the corresponding rows of the output matrix.
MatrixMap output_map(
output->flat<T>().data() +
batch_idx * num_lhs_rows * num_rhs_cols +
row_begin * num_rhs_cols,
num_shard_rows, num_rhs_cols);
output_map.noalias() = sparse_matrix * rhs_map;
});
});
}
// Sparse-Dense Matrix Multiplication assuming the CSRSparseMatrix (LHS) is
// to be transposed before the operation.
void SparseDenseMatMulWithTransposedLHS(OpKernelContext* ctx,
const int64_t batch_size,
const int64_t num_lhs_rows,
const int64_t num_lhs_cols,
const CSRSparseMatrix& lhs,
const Tensor& rhs, Tensor* output) {
auto device = ctx->eigen_device<CPUDevice>();
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
const int32_t num_threads = worker_threads.num_threads;
const int64_t num_rhs_rows = rhs.dim_size(rhs.dims() - 2);
const int64_t num_rhs_cols = rhs.dim_size(rhs.dims() - 1);
// Usually, we want to avoid transposing the sparse matrix A since it may be
// an expensive operation. Instead, we use the identity (A^T B) = (B^T A)^T.
// We don't actually transpose B or the output because it is more convenient
// to have them in column major form.
//
// However, if A is hypersparse and B and C are huge, transposing A will be
// cheaper. In the future, we should have a cost model estimating the cost
// of transposing all matrices (A, B, C) to decide which variant to use.
// Each thread writes to its own copy of the matrix product. These
// `num_threads` copies are summed together to obtain the final result.
Tensor matmul_result_buffer;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({num_threads + 1,
output->NumElements()}),
&matmul_result_buffer));
functor::SetZeroFunctor<CPUDevice, T> set_zero;
set_zero(device, matmul_result_buffer.flat<T>());
// Parallelize matrix multiplication across batch dimensions and across
// columns of A^T in each batch. These correspond to rows of A.
const int64_t block_size =
num_lhs_cols / std::max(kMaxShards, kNumShardsPerThread * num_threads);
worker_threads.workers->ParallelForWithWorkerId(
batch_size * num_lhs_cols /* total */,
thread::ThreadPool::SchedulingParams(
thread::ThreadPool::SchedulingStrategy::
kFixedBlockSize /* strategy */,
absl::nullopt /* cost_per_unit */, block_size),
[&](int64_t batch_and_row_begin, int64_t batch_and_row_end, int tid) {
HandleBatchAndRowRange(
num_lhs_cols, batch_and_row_begin, batch_and_row_end,
[&](int64_t batch_idx, int64_t row_begin, int64_t row_end) {
const int64_t num_shard_rows = row_end - row_begin;
// Define a new sparse sub-matrix from the row range
// [row_begin, row_end) of the sparse matrix A.
std::vector<int32_t> row_ptrs;
auto sparse_matrix = GetSparseMatrixRef(
lhs, batch_idx, row_begin, num_shard_rows, &row_ptrs);
// Map the corresponding `num_shard_rows` columns of B^T.
// This is the same as taking the `num_shard_rows` rows of B.
ConstMatrixMap b_dense_map(
rhs.flat<T>().data() +
batch_idx * num_rhs_rows * num_rhs_cols +
row_begin * num_rhs_cols,
num_shard_rows, num_rhs_cols);
// Map to the corresponding rows of the output.
MatrixMap output_map(
matmul_result_buffer.flat<T>().data() +
tid * batch_size * num_lhs_rows * num_rhs_cols +
batch_idx * num_lhs_rows * num_rhs_cols,
num_lhs_rows, num_rhs_cols);
// Compute the product C^T = B^T * A; restricted to the row
// range in the current shard.
if (this->conjugate_a_) {
output_map.transpose().noalias() +=
b_dense_map.transpose() * sparse_matrix.conjugate();
} else {
output_map.transpose().noalias() +=
b_dense_map.transpose() * sparse_matrix;
}
});
});
// Sum across each thread's matmul result.
using Reducer = Eigen::internal::SumReducer<T>;
using Index = typename TTypes<T>::Tensor::Index;
output->flat<T>().device(device) = matmul_result_buffer.matrix<T>().reduce(
Eigen::array<Index, 1>({0}), Reducer());
}
// Given a range [batch_and_row_begin, batch_and_row_end) which is a
// contiguous subset of [0, num_rows * batch_size), calls the function
// fn(batch_idx, row_begin, row_end) for each batch index
// and the row range [row_begin, row_end) contained in the batch.
void HandleBatchAndRowRange(
const int64_t num_rows, const int64_t batch_and_row_begin,
const int64_t batch_and_row_end,
const std::function<void(int64_t, int64_t, int64_t)>& fn) {
// Obtain the batch indices overlapping with the current shard.
const int64_t batch_begin = batch_and_row_begin / num_rows;
const int64_t batch_end_inclusive = batch_and_row_end / num_rows;
for (int64_t batch_idx = batch_begin; batch_idx <= batch_end_inclusive;
++batch_idx) {
// Find the contiguous set of rows which are contained in this shard as
// well as the current batch. We intersect with interval [batch_idx *
// num_rows, (batch_idx + 1) * num_rows) which denotes the current batch.
const int64_t current_batch_row_begin =
std::max(batch_and_row_begin, batch_idx * num_rows);
const int64_t current_batch_row_end =
std::min(batch_and_row_end, (batch_idx + 1) * num_rows);
const int64_t row_begin = current_batch_row_begin % num_rows;
const int64_t num_shard_rows =
current_batch_row_end - current_batch_row_begin;
// Edge case for when current_batch_row_end is the first index of a new
// row.
if (num_shard_rows == 0) continue;
fn(batch_idx, row_begin, row_begin + num_shard_rows);
}
}
// Transposes (and optionally, conjugates) a given Tensor. Also allocates the
// required memory for the output Tensor.
absl::Status TransposeAndConjugateTensor(OpKernelContext* ctx,
const Tensor& input, bool conjugate,
Tensor* output) {
TensorShape transposed_shape = input.shape();
transposed_shape.set_dim(input.dims() - 1,
input.dim_size(input.dims() - 2));
transposed_shape.set_dim(input.dims() - 2,
input.dim_size(input.dims() - 1));
TF_RETURN_IF_ERROR(
ctx->allocate_temp(DataTypeToEnum<T>::value, transposed_shape, output));
return TransposeAndConjugateAllocatedTensor(ctx, input, conjugate, output);
}
// Transposes (and optionally, conjugates) a given Tensor. The output should
// be already allocated.
absl::Status TransposeAndConjugateAllocatedTensor(OpKernelContext* ctx,
const Tensor& input,
bool conjugate,
Tensor* output) {
if (conjugate) {
TF_RETURN_IF_ERROR(DoConjugateMatrixTranspose(
ctx->eigen_device<CPUDevice>(), input, output));
} else {
TF_RETURN_IF_ERROR(
DoMatrixTranspose(ctx->eigen_device<CPUDevice>(), input, output));
}
return absl::OkStatus();
}
};
// GPU Kernel to compute sparse-dense matrix multiplication.
template <typename T>
class CSRMatMulGPUOp : public CSRMatMulOp<GPUDevice, T> {
using SparseMatrix = Eigen::SparseMatrix<T, Eigen::RowMajor>;
using Matrix =
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using ConstMatrixMap = Eigen::Map<const Matrix>;
using MatrixMap = Eigen::Map<Matrix>;
public:
explicit CSRMatMulGPUOp(OpKernelConstruction* c)
: CSRMatMulOp<GPUDevice, T>(c) {}
~CSRMatMulGPUOp() override {}
void Compute(OpKernelContext* ctx) final {
const CSRSparseMatrix* a_matrix;
OP_REQUIRES_OK(ctx, ExtractVariantFromInput(ctx, 0, &a_matrix));
const Tensor& b_t = ctx->input(1);
int rank;
int64_t batch_size;
OP_REQUIRES_OK(ctx,
this->ValidateInputs(*a_matrix, b_t, &rank, &batch_size));
const Tensor& a_dense_shape_t = a_matrix->dense_shape();
TensorShape a_dense_tensor_shape;
auto a_dense_shape = a_dense_shape_t.vec<int64_t>();
OP_REQUIRES_OK(
ctx, TensorShapeUtils::MakeShape(a_dense_shape, &a_dense_tensor_shape));
const int row_dim = (rank == 2) ? 0 : 1;
const int64_t a_outer_dim = a_dense_tensor_shape.dim_size(
this->transpose_a_ ? row_dim + 1 : row_dim);
const int64_t b_inner_dim =
b_t.shape().dim_size(this->transpose_b_ ? row_dim + 1 : row_dim);
const int64_t b_outer_dim =
b_t.dim_size(this->transpose_b_ ? row_dim : row_dim + 1);
const int64_t b_slice_size = b_inner_dim * b_outer_dim;
TensorShape c_shape;
if (rank == 3) {
OP_REQUIRES_OK(ctx, c_shape.AddDimWithStatus(batch_size));
}
if (this->transpose_output_) {
OP_REQUIRES_OK(ctx, c_shape.AddDimWithStatus(b_outer_dim));
OP_REQUIRES_OK(ctx, c_shape.AddDimWithStatus(a_outer_dim));
} else {
OP_REQUIRES_OK(ctx, c_shape.AddDimWithStatus(a_outer_dim));
OP_REQUIRES_OK(ctx, c_shape.AddDimWithStatus(b_outer_dim));
}
const int64_t c_matrix_lhs = c_shape.dim_size(row_dim);
const int64_t c_matrix_rhs = c_shape.dim_size(row_dim + 1);
const int64_t c_slice_size = c_matrix_lhs * c_matrix_rhs;
Tensor* c_t;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, c_shape, &c_t));
const GPUDevice& d = ctx->eigen_device<GPUDevice>();
bool use_matrix_vector_multiply = (b_outer_dim == 1);
#if TENSORFLOW_USE_ROCM
// ROCm hipsparse does not implement csrmv with transposed input a
use_matrix_vector_multiply =
use_matrix_vector_multiply && !this->transpose_a_;
#endif
if (use_matrix_vector_multiply) {
// Call matrix-vector multiply if b is a vector.
TTypes<int64_t>::ConstVec a_dense_shape_comp(
a_dense_shape.data() + row_dim, 2);
Tensor b_conj_t;
const T* b_base_ptr = b_t.template flat<T>().data();
bool conjugate_a = this->conjugate_a_;
bool conjugate_output = this->conjugate_output_;
if (this->conjugate_b_) {
if (conjugate_a) {
// In this case we can use the identity
// conj(a) * conj(b) = conj(a * b)
// instead of creating a conjugated copy of b.
conjugate_a = false;
conjugate_output = !conjugate_output;
} else {
OP_REQUIRES_OK(
ctx, ctx->forward_input_or_allocate_temp(
{1}, DataTypeToEnum<T>::value, b_t.shape(), &b_conj_t));
functor::maybe_conj<GPUDevice, T>::run(d, b_t, &b_conj_t);
b_base_ptr = b_conj_t.template flat<T>().data();
}
}
functor::CSRSparseMatrixMatVec<GPUDevice, T> csr_spmv(this->transpose_a_,
conjugate_a);
for (int i = 0; i < batch_size; ++i) {
auto a_row_ptr = a_matrix->row_pointers_vec(i);
auto a_col_ind = a_matrix->col_indices_vec(i);
auto a_values = a_matrix->values_vec<T>(i);
ConstCSRComponent<T> a_comp{a_row_ptr, a_col_ind, a_values,
a_dense_shape_comp};
const T* b_i = b_base_ptr + i * b_slice_size;
T* c_i = &c_t->template flat<T>()(i * c_slice_size);
absl::Status s = csr_spmv.Compute(ctx, a_comp, b_i, c_i);
OP_REQUIRES_OK(ctx, s);
}
if (conjugate_output) {
functor::maybe_conj_inplace<GPUDevice, T>::run(d, c_t);
}
return;
}
functor::CSRSparseMatrixMatMul<GPUDevice, T> csr_spmmadd(
this->transpose_output_);
Tensor c_mat_col_major_t;
if (!this->transpose_output_) {
// If transpose_output is false, we'll need to transpose the (col
// major) output of the csrgemm call to get proper (row-major)
// output. Which means we need to keep a temporary buffer to
// store the intermediate gemm output.
TensorShape c_mat_col_major_shape;
if (rank == 2) {
c_mat_col_major_shape = TensorShape({c_matrix_rhs, c_matrix_lhs});
} else {
c_mat_col_major_shape =
TensorShape({batch_size, c_matrix_rhs, c_matrix_lhs});
}
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
c_mat_col_major_shape, &c_mat_col_major_t));
}
// If transpose_output is true, return the direct (column-major i.e.,
// transposed) output of the csrgemm call. Otherwise we'll need
// to transpose it to row major format.
auto c_mat_col_major = (this->transpose_output_)
? c_t->flat<T>()
: c_mat_col_major_t.flat<T>();
// Possibly transpose a.
const CSRSparseMatrix* a_input_matrix;
// If we need to transpose a, we will store the result temporarily
// in the object below.
CSRSparseMatrix a_matrix_transposed;
if (!this->transpose_a_) {
a_input_matrix = a_matrix;
} else {
functor::CSRSparseMatrixTranspose<GPUDevice, T> transpose;
OP_REQUIRES_OK(ctx, transpose(ctx, this->conjugate_a_, *a_matrix,
&a_matrix_transposed));
a_input_matrix = &a_matrix_transposed;
}
auto a_input_dense_shape = a_input_matrix->dense_shape().vec<int64_t>();
// Possibly transpose b.
Tensor b_t_input;
if (!this->transpose_b_) {
b_t_input = b_t;
} else {
TensorShape b_t_transposed_shape;
if (rank == 3) {
OP_REQUIRES_OK(ctx, b_t_transposed_shape.AddDimWithStatus(batch_size));
}
OP_REQUIRES_OK(ctx, b_t_transposed_shape.AddDimWithStatus(
b_t.dim_size(row_dim + 1)));
OP_REQUIRES_OK(
ctx, b_t_transposed_shape.AddDimWithStatus(b_t.dim_size(row_dim)));
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
b_t_transposed_shape, &b_t_input));
const GPUDevice& d = ctx->eigen_device<GPUDevice>();
if (this->conjugate_b_) {
OP_REQUIRES_OK(ctx, DoConjugateMatrixTranspose(d, b_t /*input*/,
&b_t_input /*output*/));
} else {
OP_REQUIRES_OK(
ctx, DoMatrixTranspose(d, b_t /*input*/, &b_t_input /*output*/));
}
}
// Dense shape of a batch component of A.
TTypes<int64_t>::ConstVec a_input_dense_shape_comp(
a_input_dense_shape.data() + row_dim, 2);
auto b = b_t_input.flat<T>();
for (int i = 0; i < batch_size; ++i) {
auto a_row_ptr = a_input_matrix->row_pointers_vec(i);
auto a_col_ind = a_input_matrix->col_indices_vec(i);
auto a_values = a_input_matrix->values_vec<T>(i);
typename TTypes<T>::UnalignedConstMatrix b_i(b.data() + i * b_slice_size,
{b_inner_dim, b_outer_dim});
typename TTypes<T>::UnalignedMatrix c_mat_col_major_i(
c_mat_col_major.data() + i * c_slice_size,
{c_matrix_lhs, c_matrix_rhs});
ConstCSRComponent<T> a_comp{a_row_ptr, a_col_ind, a_values,
a_input_dense_shape_comp};
absl::Status s = csr_spmmadd.Compute(ctx, a_comp, b_i, c_mat_col_major_i);
OP_REQUIRES_OK(ctx, s);
}
if (!this->transpose_output_) {
// We need to return values in row major format, so transpose
// the column-major values in c_mat_col_major_t to row-major output c_t.
OP_REQUIRES_OK(ctx, DoMatrixTranspose(d, /*input=*/c_mat_col_major_t,
/*output=*/c_t));
}
if (this->conjugate_output_) {
functor::maybe_conj_inplace<GPUDevice, T>::run(d, c_t);
}
}
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
namespace functor {
namespace gpu_data_type {
// GPUDataType<T>::type translates from a C++ type (e.g. float) to a
// GPUDataType_t (e.g. CUDA_R_32F).
template <typename T>
struct GPUDataType;
template <>
struct GPUDataType<Eigen::half> {
#if GOOGLE_CUDA
static constexpr cudaDataType_t type = CUDA_R_16F;
#else
static constexpr hipDataType type = HIP_R_16F;
#endif
};
template <>
struct GPUDataType<float> {
#if GOOGLE_CUDA
static constexpr cudaDataType_t type = CUDA_R_32F;
#else
static constexpr hipDataType type = HIP_R_32F;
#endif
};
template <>
struct GPUDataType<std::complex<float>> {
#if GOOGLE_CUDA
static constexpr cudaDataType_t type = CUDA_C_32F;
#else
static constexpr hipDataType type = HIP_C_32F;
#endif
};
template <>
struct GPUDataType<double> {
#if GOOGLE_CUDA
static constexpr cudaDataType_t type = CUDA_R_64F;
#else
static constexpr hipDataType type = HIP_R_64F;
#endif
};
template <>
struct GPUDataType<std::complex<double>> {
#if GOOGLE_CUDA
static constexpr cudaDataType_t type = CUDA_C_64F;
#else
static constexpr hipDataType type = HIP_C_64F;
#endif
};
} // namespace gpu_data_type
template <typename T>
class CSRSparseMatrixMatMul<GPUDevice, T> {
public:
explicit CSRSparseMatrixMatMul(const bool transpose_output)
: transpose_output_(transpose_output) {}
absl::Status Compute(OpKernelContext* ctx, const ConstCSRComponent<T>& a,
typename TTypes<T>::UnalignedConstMatrix b,
typename TTypes<T>::UnalignedMatrix c) {
GpuSparse cuda_sparse(ctx);
TF_RETURN_IF_ERROR(cuda_sparse.Initialize());
{
// Use Csrmm/SpMM to calculate:
// C = alpha * op(A) * op(B) + beta * C
// where alpha = 1.0, beta = 0.0, A is sparse and B and C are dense.
// Note that Csrmm/Spmm assumes B and C are in column-major form; so we
// use transB == true, and manually transpose the output in place
// using blas<t>geam.
// TODO(ebrevdo,rmlarsen): Add support for transposition and adjoint.
// Create alpha and beta scalars; alpha = 1.0, beta = 0.0
// TODO(ebrevdo,rmlarsen): Add support for non-trivial alpha and beta.
const T alpha = 1;
const T beta = 0;
// A is (m, k), Bt is (ldb, k) and Ct is (ldc, n)
const int k = b.dimension(0);
DCHECK_EQ(k, a.dense_shape_host(1));
// If transpose_output_ is true, then the c matrix we receive
// here is the direct row major output (into which we will store
// csrgemm's col major output). Otherwise it's a
// temporary tensor that will store the column major output that
// will eventually be transposed.
const int m = c.dimension(transpose_output_ ? 1 : 0);
const int n = c.dimension(transpose_output_ ? 0 : 1);
DCHECK_EQ(m, a.dense_shape_host(0));
DCHECK_EQ(n, b.dimension(1));
const int nnz = a.values.size();
DCHECK_EQ(nnz, a.col_ind.size());
// ldb: leading dimension of B. If op(B)=B, it must be at least max(1, k)
// if op(A) = A and at least max (1, m) otherwise. If op(B) != B, it must
// be at least max(1, n).
const int ldb = n;
// ldc: leading dimension of C. It must be at least max(1, m) if
// op(A) = A and at least max(1, k) otherwise.
const int ldc = m;
// transA must be non-transpose if transB is transpose (cusparse
// limitation).
#if GOOGLE_CUDA
const gpusparseOperation_t transA = CUSPARSE_OPERATION_NON_TRANSPOSE;
#elif TENSORFLOW_USE_ROCM
const gpusparseOperation_t transA = HIPSPARSE_OPERATION_NON_TRANSPOSE;
#endif
// transB: b is row-major, and cusparse requires col-major b (or
// equivalently transB == transpose). this version is actually more
// efficient.
#if GOOGLE_CUDA && CUDA_VERSION >= 10020
const gpusparseOperation_t transB = CUSPARSE_OPERATION_TRANSPOSE;
gpusparseSpMatDescr_t matA;
gpusparseDnMatDescr_t matB, matC;
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseCreateCsr(
&matA, m, k, nnz, const_cast<int*>(a.row_ptr.data()),
const_cast<int*>(a.col_ind.data()), const_cast<T*>(a.values.data()),
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I, CUSPARSE_INDEX_BASE_ZERO,
gpu_data_type::GPUDataType<T>::type));
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseCreateDnMat(
&matB, n, k, ldb, const_cast<T*>(b.data()),
gpu_data_type::GPUDataType<T>::type, CUSPARSE_ORDER_COL));
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseCreateDnMat(
&matC, m, n, ldc, c.data(), gpu_data_type::GPUDataType<T>::type,
CUSPARSE_ORDER_COL));
#if CUDA_VERSION >= 12000
cusparseSpMMAlg_t algo = CUSPARSE_SPMM_ALG_DEFAULT;
#else
cusparseSpMMAlg_t algo = CUSPARSE_MM_ALG_DEFAULT;
#endif
size_t bufferSize = 0;
TF_RETURN_IF_ERROR(cuda_sparse.SpMMBufferSize(
transA, transB, &alpha, matA, matB, &beta, matC, algo, &bufferSize));
Tensor buffer;
TF_RETURN_IF_ERROR(ctx->allocate_temp(
DT_INT8, TensorShape({static_cast<int64_t>(bufferSize)}), &buffer));
DCHECK(buffer.flat<int8_t>().data() != nullptr);
TF_RETURN_IF_ERROR(cuda_sparse.SpMM(transA, transB, &alpha, matA, matB,
&beta, matC, algo,
buffer.flat<int8_t>().data()));
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseDestroyDnMat(matB));
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseDestroyDnMat(matC));
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseDestroySpMat(matA));
#elif TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 40200
// Use SPMM
const gpusparseOperation_t transB = HIPSPARSE_OPERATION_TRANSPOSE;
gpusparseSpMatDescr_t matA;
gpusparseDnMatDescr_t matB, matC;
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseCreateCsr(
&matA, m, k, nnz, const_cast<int*>(a.row_ptr.data()),
const_cast<int*>(a.col_ind.data()), const_cast<T*>(a.values.data()),
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_BASE_ZERO,
gpu_data_type::GPUDataType<T>::type));
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseCreateDnMat(
&matB, n, k, ldb, const_cast<T*>(b.data()),
gpu_data_type::GPUDataType<T>::type, HIPSPARSE_ORDER_COLUMN));
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseCreateDnMat(
&matC, m, n, ldc, c.data(), gpu_data_type::GPUDataType<T>::type,
HIPSPARSE_ORDER_COLUMN));
size_t bufferSize = 0;
TF_RETURN_IF_ERROR(cuda_sparse.SpMMBufferSize(
transA, transB, &alpha, matA, matB, &beta, matC,
HIPSPARSE_MM_ALG_DEFAULT, &bufferSize));
Tensor buffer;
TF_RETURN_IF_ERROR(ctx->allocate_temp(
DT_INT8, TensorShape({static_cast<int64_t>(bufferSize)}), &buffer));
DCHECK(buffer.flat<int8>().data() != nullptr);
TF_RETURN_IF_ERROR(cuda_sparse.SpMM(transA, transB, &alpha, matA, matB,
&beta, matC, HIPSPARSE_MM_ALG_DEFAULT,
buffer.flat<int8>().data()));
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseDestroyDnMat(matB));
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseDestroyDnMat(matC));
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseDestroySpMat(matA));
#else
#if GOOGLE_CUDA
const gpusparseOperation_t transB = CUSPARSE_OPERATION_TRANSPOSE;
gpusparseMatDescr_t descrA;
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseCreateMatDescr(&descrA));
TF_RETURN_IF_GPUSPARSE_ERROR(
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL));
TF_RETURN_IF_GPUSPARSE_ERROR(
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO));
#elif TENSORFLOW_USE_ROCM
const gpusparseOperation_t transB = HIPSPARSE_OPERATION_TRANSPOSE;
gpusparseMatDescr_t descrA;
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseCreateMatDescr(&descrA));
TF_RETURN_IF_GPUSPARSE_ERROR(
se::wrap::hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL));
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseSetMatIndexBase(
descrA, HIPSPARSE_INDEX_BASE_ZERO));
#endif // GOOGLE_CUDA
TF_RETURN_IF_ERROR(
cuda_sparse.Csrmm(transA, transB, m, n, k, nnz, &alpha, descrA,
a.values.data(), a.row_ptr.data(), a.col_ind.data(),
b.data(), ldb, &beta, c.data(), ldc));
#endif // GOOGLE_CUDA && CUDA_VERSION >= 10020
}
return absl::OkStatus();
}
private:
bool transpose_output_;
};
template <typename T>
class CSRSparseMatrixMatVec<GPUDevice, T> {
public:
CSRSparseMatrixMatVec(bool transpose_a, bool conjugate_a)
: transA_(TransposeAndConjugateToGpuSparseOp(transpose_a, conjugate_a,
&status_)) {}
absl::Status Compute(OpKernelContext* ctx, const ConstCSRComponent<T>& a,
const T* x, T* y) {
TF_RETURN_IF_ERROR(status_);
GpuSparse cuda_sparse(ctx);
TF_RETURN_IF_ERROR(cuda_sparse.Initialize());
{
// Use Csrmv to calculate:
// y = alpha * op(A) * x + beta * y
// where alpha = 1.0, beta = 0.0, A is a sparse matrix and x and y are
// dense vectors.
// Create alpha and beta scalars; alpha = 1.0, beta = 0.0
// TODO(rmlarsen,ebrevdo): Add support for general alpha, beta.
const T alpha = 1;
const T beta = 0;
#if GOOGLE_CUDA && CUDA_VERSION < 10020
gpusparseMatDescr_t descrA;
TF_RETURN_IF_GPUSPARSE_ERROR(cusparseCreateMatDescr(&descrA));
TF_RETURN_IF_GPUSPARSE_ERROR(
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL));
TF_RETURN_IF_GPUSPARSE_ERROR(
cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ZERO));
#elif TENSORFLOW_USE_ROCM
gpusparseMatDescr_t descrA;
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseCreateMatDescr(&descrA));
TF_RETURN_IF_GPUSPARSE_ERROR(
se::wrap::hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL));
TF_RETURN_IF_GPUSPARSE_ERROR(se::wrap::hipsparseSetMatIndexBase(
descrA, HIPSPARSE_INDEX_BASE_ZERO));
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
const int m = a.dense_shape_host(0);
const int n = a.dense_shape_host(1);
const int nnz = a.values.size();
DCHECK_EQ(nnz, a.col_ind.size());
#if GOOGLE_CUDA && (CUDA_VERSION >= 10020)
TF_RETURN_IF_ERROR(cuda_sparse.Csrmv(transA_, m, n, nnz, &alpha,
a.values.data(), a.row_ptr.data(),
a.col_ind.data(), x, &beta, y));
#else
TF_RETURN_IF_ERROR(cuda_sparse.Csrmv(transA_, m, n, nnz, &alpha, descrA,
a.values.data(), a.row_ptr.data(),
a.col_ind.data(), x, &beta, y));
#endif
}
return absl::OkStatus();
}
private:
absl::Status status_;
const gpusparseOperation_t transA_;
};
} // namespace functor
#endif // GOOGLE_CUDA || TENSORFLOW_USE_ROCM
} // namespace tensorflow
#endif // TENSORFLOW_CORE_KERNELS_SPARSE_MAT_MUL_OP_H_ | c | github | https://github.com/tensorflow/tensorflow | tensorflow/core/kernels/sparse/mat_mul_op.h |
"""
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator Class implementing all of the random number distributions
default_rng Default constructor for ``Generator``
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
PCG64DXSM
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
choice Random sample from 1-D array.
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
random_integers Uniformly distributed integers in a given range.
(deprecated, use ``integers(..., closed=True)`` instead)
random_sample Alias for `random_sample`
randint Uniformly distributed integers in a given range
seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
exponential Exponential distribution.
f F (Fisher-Snedecor) distribution.
gamma Gamma distribution.
geometric Geometric distribution.
gumbel Gumbel distribution.
hypergeometric Hypergeometric distribution.
laplace Laplace distribution.
logistic Logistic distribution.
lognormal Log-normal distribution.
logseries Logarithmic series distribution.
negative_binomial Negative binomial distribution.
noncentral_chisquare Non-central chi-square distribution.
noncentral_f Non-central F distribution.
normal Normal / Gaussian distribution.
pareto Pareto distribution.
poisson Poisson distribution.
power Power distribution.
rayleigh Rayleigh distribution.
triangular Triangular distribution.
uniform Uniform distribution.
vonmises Von Mises circular distribution.
wald Wald (inverse Gaussian) distribution.
weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
standard_normal Standard normal distribution.
standard_t Standard Student's t-distribution.
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
"""
__all__ = [
'beta',
'binomial',
'bytes',
'chisquare',
'choice',
'dirichlet',
'exponential',
'f',
'gamma',
'geometric',
'get_state',
'gumbel',
'hypergeometric',
'laplace',
'logistic',
'lognormal',
'logseries',
'multinomial',
'multivariate_normal',
'negative_binomial',
'noncentral_chisquare',
'noncentral_f',
'normal',
'pareto',
'permutation',
'poisson',
'power',
'rand',
'randint',
'randn',
'random',
'random_integers',
'random_sample',
'ranf',
'rayleigh',
'sample',
'seed',
'set_state',
'shuffle',
'standard_cauchy',
'standard_exponential',
'standard_gamma',
'standard_normal',
'standard_t',
'triangular',
'uniform',
'vonmises',
'wald',
'weibull',
'zipf',
]
# add these for module-freeze analysis (like PyInstaller)
from . import _bounded_integers, _common, _pickle
from ._generator import Generator, default_rng
from ._mt19937 import MT19937
from ._pcg64 import PCG64, PCG64DXSM
from ._philox import Philox
from ._sfc64 import SFC64
from .bit_generator import BitGenerator, SeedSequence
from .mtrand import *
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
'BitGenerator']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester | python | github | https://github.com/numpy/numpy | numpy/random/__init__.py |
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.spark_sql.toolkit import SparkSQLToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"SparkSQLToolkit": "langchain_community.agent_toolkits.spark_sql.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"SparkSQLToolkit",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/agents/agent_toolkits/spark_sql/toolkit.py |
//===--- TypeContextInfo.h --------------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2019 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_IDE_TYPECONTEXTINFO_H
#define SWIFT_IDE_TYPECONTEXTINFO_H
#include "swift/AST/Type.h"
#include "swift/Basic/LLVM.h"
namespace swift {
class IDEInspectionCallbacksFactory;
namespace ide {
/// A result item for context info query.
class TypeContextInfoItem {
public:
/// Possible expected type.
Type ExpectedTy;
/// Members of \c ExpectedTy which can be referenced by "Implicit Member
/// Expression".
SmallVector<ValueDecl *, 0> ImplicitMembers;
TypeContextInfoItem(Type ExpectedTy) : ExpectedTy(ExpectedTy) {}
};
/// An abstract base class for consumers of context info results.
class TypeContextInfoConsumer {
public:
virtual ~TypeContextInfoConsumer() {}
virtual void handleResults(ArrayRef<TypeContextInfoItem>) = 0;
};
/// Create a factory for code completion callbacks.
IDEInspectionCallbacksFactory *
makeTypeContextInfoCallbacksFactory(TypeContextInfoConsumer &Consumer);
} // namespace ide
} // namespace swift
#endif // SWIFT_IDE_TYPECONTEXTINFO_H | c | github | https://github.com/apple/swift | include/swift/IDE/TypeContextInfo.h |
# -*- coding: utf-8 -*-
"""
Contains the crawling models. We use namedtuple for most models (easier to
pickle, lower footprint, indicates that it is immutable) and we use classes for
objects with mutable states and helper methods.
Classes with crawling logic are declared in the crawler module.
"""
from __future__ import unicode_literals, absolute_import
from collections import namedtuple
from optparse import OptionParser, OptionGroup
from pylinkchecker.compat import get_safe_str
from pylinkchecker.urlutil import get_clean_url_split
DEFAULT_TYPES = ['a', 'img', 'script', 'link']
TYPE_ATTRIBUTES = {
'a': 'href',
'img': 'src',
'script': 'src',
'link': 'href',
}
DEFAULT_TIMEOUT = 10
MODE_THREAD = "thread"
MODE_PROCESS = "process"
MODE_GREEN = "green"
DEFAULT_WORKERS = {
MODE_THREAD: 1,
MODE_PROCESS: 1,
MODE_GREEN: 1000,
}
PARSER_STDLIB = "html.parser"
PARSER_LXML = "lxml"
PARSER_HTML5 = "html5lib"
# TODO Add support for gumbo. Will require some refactoring of the parsing
# logic.
# PARSER_GUMBO = "gumbo"
FORMAT_PLAIN = "plain"
FORMAT_HTML = "html"
FORMAT_JSON = "json"
WHEN_ALWAYS = "always"
WHEN_ON_ERROR = "error"
REPORT_TYPE_ERRORS = "errors"
REPORT_TYPE_SUMMARY = "summary"
REPORT_TYPE_ALL = "all"
VERBOSE_QUIET = "0"
VERBOSE_NORMAL = "1"
VERBOSE_INFO = "2"
HTML_MIME_TYPE = "text/html"
PAGE_QUEUED = '__PAGE_QUEUED__'
PAGE_CRAWLED = '__PAGE_CRAWLED__'
# Note: we use namedtuple to exchange data with workers because they are
# immutable and easy to pickle (as opposed to a class).
WorkerInit = namedtuple("WorkerInit", ["worker_config", "input_queue",
"output_queue", "logger"])
WorkerConfig = namedtuple("WorkerConfig", ["username", "password", "types",
"timeout", "parser", "strict_mode"])
WorkerInput = namedtuple("WorkerInput", ["url_split", "should_crawl"])
Response = namedtuple("Response", ["content", "status", "exception",
"original_url", "final_url", "is_redirect", "is_timeout"])
ExceptionStr = namedtuple("ExceptionStr", ["type_name", "message"])
Link = namedtuple("Link", ["type", "url_split", "original_url_split",
"source_str"])
PageCrawl = namedtuple("PageCrawl", ["original_url_split", "final_url_split",
"status", "is_timeout", "is_redirect", "links", "exception", "is_html"])
PageStatus = namedtuple("PageStatus", ["status", "sources"])
PageSource = namedtuple("PageSource", ["origin", "origin_str"])
class UTF8Class(object):
"""Handles unicode string from __unicode__() in: __str__() and __repr__()
"""
def __str__(self):
return get_safe_str(self.__unicode__())
def __repr__(self):
return get_safe_str(self.__unicode__())
class LazyLogParam(object):
"""Lazy Log Parameter that is only evaluated if the logging statement
is printed"""
def __init__(self, func):
self.func=func
def __str__(self):
return str(self.func())
class Config(UTF8Class):
"""Contains all the configuration options."""
def __init__(self):
# Design note: we only use attributes when options need to be
# transformed. Otherwise, we use options.
self.parser = self._build_parser()
self.options = None
self.start_urls = []
self.worker_config = None
self.accepted_hosts = []
self.ignored_prefixes = []
self.worker_size = 0
def should_crawl(self, url_split):
"""Returns True if url split is local AND run_once is False"""
return not self.options.run_once and self.is_local(url_split)
def is_local(self, url_split):
"""Returns true if url split is in the accepted hosts"""
return url_split.netloc in self.accepted_hosts
def should_download(self, url_split):
"""Returns True if the url does not start with an ignored prefix and if
it is local or outside links are allowed."""
local = self.is_local(url_split)
if not self.options.test_outside and not local:
return False
url = url_split.geturl()
for ignored_prefix in self.ignored_prefixes:
if url.startswith(ignored_prefix):
return False
return True
def parse_cli_config(self):
"""Builds the options and args based on the command line options."""
(self.options, self.start_urls) = self.parser.parse_args()
self._parse_config()
def parse_api_config(self, start_urls, options_dict=None):
"""Builds the options and args based on passed parameters."""
# TODO Add options
options = self._get_options(options_dict)
(self.options, self.start_urls) = self.parser.parse_args(options + start_urls)
self._parse_config()
def _get_options(self, options_dict):
if not options_dict:
options_dict = {}
options = []
for key, value in options_dict.items():
if isinstance(value, bool) and value:
options.append("--{0}".format(key))
else:
options.append("--{0}={1}".format(key, value))
return options
def _parse_config(self):
self.worker_config = self._build_worker_config(self.options)
self.accepted_hosts = self._build_accepted_hosts(self.options,
self.start_urls)
if self.options.ignored_prefixes:
self.ignored_prefixes = self.options.ignored_prefixes.split(',')
if self.options.workers:
self.worker_size = self.options.workers
else:
self.worker_size = DEFAULT_WORKERS[self.options.mode]
def _build_worker_config(self, options):
types = options.types.split(',')
for element_type in types:
if element_type not in DEFAULT_TYPES:
raise ValueError("This type is not supported: {0}"
.format(element_type))
return WorkerConfig(options.username, options.password, types,
options.timeout, options.parser, options.strict_mode)
def _build_accepted_hosts(self, options, start_urls):
hosts = set()
urls = []
if self.options.accepted_hosts:
urls = self.options.accepted_hosts.split(',')
urls = urls + start_urls
for url in urls:
split_result = get_clean_url_split(url)
hosts.add(split_result.netloc)
return hosts
def _build_parser(self):
# avoid circular references
import pylinkchecker
version = pylinkchecker.__version__
parser = OptionParser(usage="%prog [options] URL ...",
version="%prog {0}".format(version))
parser.add_option("-V", "--verbose", dest="verbose", action="store",
default=VERBOSE_QUIET, choices=[VERBOSE_QUIET, VERBOSE_NORMAL,
VERBOSE_INFO])
crawler_group = OptionGroup(parser, "Crawler Options",
"These options modify the way the crawler traverses the site.")
crawler_group.add_option("-O", "--test-outside", dest="test_outside",
action="store_true", default=False,
help="fetch resources from other domains without crawling them")
crawler_group.add_option("-H", "--accepted-hosts",
dest="accepted_hosts", action="store", default=None,
help="comma-separated list of additional hosts to crawl (e.g., "
"example.com,subdomain.another.com)")
crawler_group.add_option("-i", "--ignore", dest="ignored_prefixes",
action="store", default=None,
help="comma-separated list of host/path prefixes to ignore "
"(e.g., www.example.com/ignore_this_and_after/)")
crawler_group.add_option("-u", "--username", dest="username",
action="store", default=None,
help="username to use with basic HTTP authentication")
crawler_group.add_option("-p", "--password", dest="password",
action="store", default=None,
help="password to use with basic HTTP authentication")
# crawler_group.add_option("-U", "--unique", dest="unique",
# action="store_true", default=False)
crawler_group.add_option("-t", "--types", dest="types", action="store",
default=",".join(DEFAULT_TYPES),
help="Comma-separated values of tags to look for when crawling"
"a site. Default (and supported types): a,img,link,script")
crawler_group.add_option("-T", "--timeout", dest="timeout",
type="int", action="store", default=DEFAULT_TIMEOUT,
help="Seconds to wait before considering that a page timed out")
crawler_group.add_option("-C", "--strict", dest="strict_mode",
action="store_true", default=False,
help="Does not strip href and src attributes from whitespaces")
crawler_group.add_option("-P", "--progress", dest="progress",
action="store_true", default=False,
help="Prints crawler progress in the console")
crawler_group.add_option("-N", "--run-once", dest="run_once",
action="store_true", default=False,
help="Only crawl the first page.")
# TODO Add follow redirect option.
parser.add_option_group(crawler_group)
perf_group = OptionGroup(parser, "Performance Options",
"These options can impact the performance of the crawler.")
perf_group.add_option("-w", "--workers", dest="workers", action="store",
default=None, type="int",
help="Number of workers to spawn")
perf_group.add_option("-m", "--mode", dest="mode", action="store",
default=MODE_THREAD, choices=[MODE_THREAD, MODE_PROCESS,
MODE_GREEN],
help="Types of workers: thread (default), process, or green")
perf_group.add_option("-R", "--parser", dest="parser", action="store",
default=PARSER_STDLIB, choices=[PARSER_STDLIB, PARSER_LXML,
PARSER_HTML5],
help="Types of HTML parse: html.parser (default), lxml, html5lib")
parser.add_option_group(perf_group)
output_group = OptionGroup(parser, "Output Options",
"These options change the output of the crawler.")
output_group.add_option("-f", "--format", dest="format", action="store",
default=FORMAT_PLAIN, choices=[FORMAT_PLAIN],
help="Format of the report: plain")
output_group.add_option("-o", "--output", dest="output", action="store",
default=None,
help="Path of the file where the report will be printed.")
output_group.add_option("-W", "--when", dest="when", action="store",
default=WHEN_ALWAYS, choices=[WHEN_ALWAYS, WHEN_ON_ERROR],
help="When to print the report. error (only if a "
"crawling error occurs) or always (default)")
output_group.add_option("-E", "--report-type", dest="report_type",
action="store", default=REPORT_TYPE_ERRORS, choices=[
REPORT_TYPE_ERRORS, REPORT_TYPE_SUMMARY, REPORT_TYPE_ALL],
help="Type of report to print: errors (default, summary and "
"erroneous links), summary, all (summary and all links)")
output_group.add_option("-c", "--console", dest="console",
action="store_true", default=False,
help="Prints report to the console in addition to other output"
" options such as file or email.")
crawler_group.add_option("-S", "--show-source", dest="show_source",
action="store_true", default=False,
help="Show source of links (html) in the report.")
parser.add_option_group(output_group)
email_group = OptionGroup(parser, "Email Options",
"These options allows the crawler to send a report by email.")
email_group.add_option("-a", "--address", dest="address", action="store",
default=None,
help="Comma-separated list of email addresses used to send a "
"report")
email_group.add_option("--from", dest="from_address", action="store",
default=None,
help="Email address to use in the from field of the email "
"(optional)")
email_group.add_option("-s", "--smtp", dest="smtp", action="store",
default=None,
help="Host of the smtp server")
email_group.add_option("--port", dest="port", action="store",
default=25, type="int",
help="Port of the smtp server (optional)")
email_group.add_option("--tls", dest="tls", action="store_true",
default=False,
help="Use TLS with the email server.")
email_group.add_option("--subject", dest="subject", action="store",
default=None,
help="Subject of the email (optional)")
email_group.add_option("--smtp-username", dest="smtp_username",
action="store", default=None,
help="Username to use with the smtp server (optional)")
email_group.add_option("--smtp-password", dest="smtp_password",
action="store", default=None,
help="Password to use with the smtp server (optional)")
parser.add_option_group(email_group)
return parser
def __unicode__(self):
return "Configuration - Start URLs: {0} - Options: {1}".format(
self.start_urls, self.options)
class SitePage(UTF8Class):
"""Contains the crawling result for a page.
This is a class because we need to keep track of the various sources
linking to this page and it must be modified as the crawl progresses.
"""
def __init__(self, url_split, status=200, is_timeout=False, exception=None,
is_html=True, is_local=True):
self.url_split = url_split
self.original_source = None
self.sources = []
self.type = type
self.status = status
self.is_timeout = is_timeout
self.exception = exception
self.is_html = is_html
self.is_local = is_local
self.is_ok = status and status < 400
def add_sources(self, page_sources):
self.sources.extend(page_sources)
def get_status_message(self):
if self.status:
if self.status < 400:
return "ok ({0})".format(self.status)
elif self.status == 404:
return "not found (404)"
else:
return "error (status={0})".format(self.status)
elif self.is_timeout:
return "error (timeout)"
elif self.exception:
return "error ({0}): {1}".format(self.exception.type_name,
self.exception.message)
else:
return "error"
def __unicode__(self):
return "Resource {0} - {1}".format(self.url_split.geturl(), self.status) | unknown | codeparrot/codeparrot-clean | ||
#-------------------------------------------------------------------------------
#
# Insteon IO linc 2450
#
import iofun
import message
from device import Device
from switch import Switch
from querier import Querier
from querier import MsgHandler
from threading import Timer
from linkdb import *
from device import LinkRecordAdder
from dbbuilder import GenericDBBuilder
from linkdb import LightDBRecordFormatter
from us.pfrommer.insteon.msg import Msg
from us.pfrommer.insteon.msg import MsgListener
from us.pfrommer.insteon.msg import InsteonAddress
def out(msg = ""):
iofun.out(msg)
def outchars(msg = ""):
iofun.outchars(msg)
class DefaultMsgHandler(MsgHandler):
label = None
def __init__(self, l):
self.label = l
def processMsg(self, msg):
iofun.out(self.label + " got msg: " + msg.toString())
return 1
class IOLinc2450(Device):
"""============== Insteon I/O Linc 2450 ==============="""
def __init__(self, name, addr):
Device.__init__(self, name, addr)
self.dbbuilder = GenericDBBuilder(addr, self.db)
self.db.setRecordFormatter(LightDBRecordFormatter())
def ping(self):
"""ping()
pings device"""
self.querier.setMsgHandler(DefaultMsgHandler("ping"))
self.querier.querysd(0x0F, 0x01);
#
# somebody should figure out how this thing works ... why not YOU?
# | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from math import acos, asin, atan2, cos, degrees, pi, radians, sin, sqrt
import numpy as np
from auttitude.io import (
sphere_line,
sphere_plane,
translate_attitude,
dcos_plane,
dcos_line,
format_attitude,
)
from auttitude.math import normalized_cross
from auttitude.stats import DEFAULT_GRID, SphericalStatistics
class Vector(np.ndarray):
"""
Class that represents one normalized vector in space. This class
extends Numpy.ndarray class that is used as storage container for
the information.
Parameters:
dcos_data: Iterable object with 3 elements (can be even another
Vector) to construct a Vector from. Inside 'io' module there
are methods to convert attitude data to normalized direction
cosines.
"""
def __new__(cls, dcos_data):
return np.asarray(dcos_data).view(cls)
def angle_with(self, other, precise=False):
"""Returns the angle (in radians) between both vectors using
the dot product between them. Optionally, if precise is set to True,
calculates the angle using the arctangent of the ratio of the
magnitude of the cross and dot products between the vectors.
Parameter:
other: A Vector like object.
precise: whether to use arccosine or arctangent (defaults False)
"""
if not precise:
self_length = self.length
other_length = sqrt(other.dot(other))
return acos(self.dot(other) / (self_length * other_length))
else:
return atan2(self.cross_with(other), self.dot(other))
def cross_with(self, other):
"""Returns the cross product between both vectors.
Parameter:
other: A Vector like object.
"""
return Vector(np.cross(self, other))
def normalized_cross_with(self, other):
"""Returns the normalized cross product between this and other
vectors.
Parameter:
other: A Vector like object.
"""
return Vector(normalized_cross(self, other))
@property
def attitude(self):
"""Returns the spherical coordinates of the normalized vector,
considering it to be a Line in geological sense, as a
Trend/Plunge pair in degrees."""
x, y, z = self / self.length
if z > 0:
x, y = -x, -y
return degrees(atan2(x, y)) % 360, degrees(asin(abs(z)))
def format(
self,
strike=False,
direction_format="quadrant",
strike_format="right hand rule",
):
"""Returns the spherical coordinates of the normalized vector,
considering it to be a Line in geological sense, as a
Trend/Plunge pair formatted using the given parameters.
Please refer to format_attitude method for description of parameters."""
return format_attitude(
*self.attitude,
strike=strike,
direction_format=direction_format,
strike_format=strike_format
)
@property # this should be cached
def length(self):
"""Returns the euclidian norm of this vector."""
return sqrt(self.dot(self))
@property
def direction_vector(self):
"""Returns the vector's left horizontal perpendicular vector.
defaults to (1, 0, 0) if the vector is vertical."""
if abs(self[2]) == 1.0:
return Vector((1.0, 0.0, 0.0))
direction = Vector((self[1], -self[0], 0.0))
return direction / direction.length
@property
def dip_vector(self):
"""Returns the vector perpendicular to both this vector
and it's direction vector. If this vector represents a plane,
the resulting vector represents the maximum slope direction."""
return Vector(np.cross(self / self.length, self.direction_vector))
@property
def projection_matrix(self):
"""Returns the matrix that projects vectors onto this vector."""
return np.outer(self, self)
@property
def rejection_matrix(self):
"""Returns the matrix that rejects of a vector to this vector."""
return np.eye(3) - self.projection_matrix
@property
def cross_product_matrix(self):
"""Returns the matrix that operates the cross product with this vector
when multiplied by another vector"""
return np.array(
(
(0.0, -self[2], self[1]),
(self[2], 0.0, -self[0]),
(-self[1], self[0], 0.0),
)
)
def get_rotation_matrix(self, theta):
"""Returns the counterclockwise rotation matrix about this vector
by angle theta.
Parameters:
theta: Rotation angle in radians
"""
return (
cos(theta) * np.eye(3)
+ sin(theta) * self.cross_product_matrix
+ (1 - cos(theta)) * self.projection_matrix
)
def get_great_circle(self, step=radians(1.0), offset=0.0):
"""Returns an array of n points equally spaced along the great circle
normal to this vector.
Parameters:
step: Angular step in radians to generate points around great
circle.
offset: Angular offset in radians from direction to generate points.
"""
theta_range = np.arange(offset, 2 * pi + offset, step) % (2 * pi)
sin_range = np.sin(theta_range)
cos_range = np.cos(theta_range)
return (
(
self.direction_vector[:, None] * cos_range
+ self.dip_vector[:, None] * sin_range
).T,
)
def get_small_circle(self, alpha, A=0, B=0, step=radians(1.0), offset=0.0):
"""Returns a pair of arrays representing points spaced step along
both small circles with an semi-apical opening of alpha around
this vector.
Parameters:
alpha: Apperture of the small circle in radians
step: Angular step in radians to generate points around small
circle.
offset: Angular offset in radians from direction to generate points.
"""
if A == 0 and B == 0:
sc = self.get_great_circle(step, offset)[0].T * sin(alpha) + self[
:, None
] * cos(alpha)
else:
theta_range = np.arange(0, 2 * pi, step)
alpha_ = (
alpha
+ A * np.cos(2 * theta_range)
+ B * np.sin(2 * theta_range)
)
sc = self.get_great_circle(step)[0].T * np.sin(alpha_) + self[
:, None
] * np.cos(alpha_)
return sc.T, -sc.T
def arc_to(self, other, step=radians(1.0)):
"""Returns an array of points spaced step along the great circle
between both vectors.
Parameters:
step: Angular step in radians to generate points along the
great-circle arc.
"""
normal = self.rejection_matrix.dot(other)
normal /= sqrt(normal.dot(normal))
theta_range = np.arange(0, self.angle_with(other), step)
sin_range = np.sin(theta_range)
cos_range = np.cos(theta_range)
return ((self * cos_range[:, None] + normal * sin_range[:, None]),)
class Plane(Vector):
"""
Like the Vector class but, more specifically representing a plane in
space defined by the direction cosines of the plane dip direction/dip
pair.
Parameters:
dcos_data: Direction cosines of the dip direction/dip pair.
"""
@staticmethod
def from_attitude(direction, dip, strike=False):
"""
Return a new Plane from direction, dip and strike given.
Please refer to translate_attitude method for parameters description.
"""
dd, d = translate_attitude(direction, dip, strike)
return Plane(dcos_plane((dd, d)))
def intersection_with(self, other):
"""Returns the line of intersection of this and the other plane.
Parameter:
other: a Plane like object that will intersect with this object.
"""
line = Line(self.cross_with(other))
line_length = line.length
return line / line_length if line_length > 0 else line
@property
def rhr_attitude(self):
dd, d = self.attitude
return (dd - 90) % 360, d
@property
def attitude(self):
"""Returns the spherical coordinates of the plane as a
Dip Direction/Dip pair, in degrees."""
x, y, z = self / self.length
if z > 0:
x, y = -x, -y
return degrees(atan2(-x, -y)) % 360, degrees(acos(abs(z)))
def format(
self,
strike=True,
direction_format="azimuth",
strike_format="dip quadrant",
):
"""Returns the spherical coordinates of the plane as a
Direction/Dip pair formatted using the given parameters.
Please refer to format_attitude method for description of parameters."""
return format_attitude(
*self.attitude,
strike=strike,
direction_format=direction_format,
strike_format=strike_format
)
class Line(Vector):
"""
Like the Vector class but, more specifically representing a line in
space defined by the direction cosines of the line direction/dip.
Parameters:
dcos_data: Direction cosines of the line direction/dip.
"""
@staticmethod
def from_attitude(direction, dip, strike=False):
"""
Return a new Line Object from direction, dip and strike given.
Please refer to translate_attitude method for description of parameters.
"""
direction, dip = translate_attitude(direction, dip, strike)
return Line(dcos_line((direction, dip)))
def plane_with(self, other):
"""Returns the plane containing this and the other line.
Parameter:
other: a Line like object that will define the returned plane.
"""
plane = Plane(self.cross(other))
plane_length = plane.length
return plane / plane_length if plane_length > 0 else plane
class VectorSet(np.ndarray):
"""Class that represents a set (collection) of Vectors.
Parameters:
dcos_data: Is an array of direction cosines.
"""
item_class = Vector
def __new__(cls, dcos_data):
obj = np.asarray(dcos_data).view(cls)
return obj
def __finalize_array__(self, obj):
if obj is None:
return
def __getitem__(self, x):
item = super(VectorSet, self).__getitem__(x)
if np.atleast_2d(item).shape == (1, 3):
return item.view(self.item_class)
else:
return item
@property
def stats(self):
"""Contains spherical statistics object for the data
set.
"""
return SphericalStatistics(self)
@property
def attitude(self):
"""Converts this data from direction cosines to attitudes."""
return sphere_line(self)
def count_fisher(self, k=None, grid=None):
"""Performs grid counting of the data by Fisher smoothing.
Parameters:
k: von Mises-Fisher k parameter, see
stats.SphericalGrid.count_fisher.
grid: A stats.Spherical grid object to count on. If None
the default grid defined on stats.DEFAULT_GRID will be
used.
"""
if grid is None:
grid = DEFAULT_GRID
return grid.count_fisher(self, k)
def count_kamb(self, theta=None, grid=None):
"""Performs grid counting of the data by small circles of
aperture theta.
Parameters:
theta: Robin and Jowett (1986) based on Kamb (1956) theta
parameter, see stats.SphericalGrid.count_kamb.
grid: A stats.Spherical grid object to count on. If None
the default grid defined on stats.DEFAULT_GRID will be
used.
"""
if grid is None:
grid = DEFAULT_GRID
return grid.count_kamb(self, theta)
def normalized_cross_with(self, other):
"""Returns a VectorSet object containing the normalized cross
product of all possible pairs between this VectorSet and an
(n, 3) array-like
Parameter:
other: A VectorSet like object.
"""
vectors = np.zeros((len(self) * len(other), 3))
i = 0
for self_vector in self:
for other_vector in other:
cross = normalized_cross(self_vector, other_vector)
vectors[i] = cross if cross[2] < 0 else -cross
i += 1
return VectorSet(vectors)
def angle_with(self, other, precise=False):
"""Returns the angles matrix between this Spherical Data and an
(n, 3) array-like.
Parameter:
other: A VectorSet like object.
precise: whether to use arccosine or arctangent (defaults False)
"""
angles = np.zeros((len(self), len(other)))
for i, self_vector in enumerate(self):
for j, other_vector in enumerate(other):
angles[i, j] = self_vector.angle_with(other_vector, precise)
return angles
def get_great_circle(self, step=radians(1.0)):
"""Returns a generator to the list of great circles of
this VectorSet vectors.
Parameters:
step: Angular step in radians to generate points around great
circle.
"""
for vector in self:
yield vector.get_great_circle(step)[0] # because of plot_circles
class PlaneSet(VectorSet):
"""Class that represents a set (collection) of Planes.
Parameters:
dcos_data: Is an array of direction cosines.
"""
item_class = Plane
def intersection_with(self, other):
"""Returns the intersection of all combinations of
planes in this set with the planes in other set as a
list of lines defined as a VectorSet.
Parameter:
other: A PlaneSet like object.
"""
return self.normalized_cross_with(other).view(LineSet)
@property
def attitude(self):
"""Converts this data from direction cosines to attitudes."""
return sphere_plane(self)
class LineSet(VectorSet):
"""Class that represents a set (collection) of Lines.
Parameters:
dcos_data: Is an array of direction cosines.
"""
item_class = Line
def planes_with(self, other):
"""Return the list of Planes resulting from the
intersection of the combination of all lines in
this LineSet with other LineSet like object.
Parameter:
other: A LineSet like object.
"""
return self.normalized_cross_with(other).view(PlaneSet) | unknown | codeparrot/codeparrot-clean | ||
"""
Social Facebook API
"""
# TODO
# There are still some performance and scalability issues that should be
# addressed for the various endpoints in this social_facebook djangoapp.
#
# For the Courses and Friends API:
# For both endpoints, we are retrieving the same data from the Facebook server.
# We are then simply organizing and filtering that data differently for each endpoint.
#
# Here are 3 ideas that can be explored further:
#
# Option 1. The app can just call one endpoint that provides a mapping between CourseIDs and Friends,
# and then cache that data once. The reverse map from Friends to CourseIDs can then be created on the app side.
#
# Option 2. The app once again calls just one endpoint (since the same data is computed for both),
# and caches the data once. The difference from #1 is that the server does the computation of the reverse-map and
# sends both maps down to the client. It's a tradeoff between bandwidth and client-side computation. So the payload
# could be something like:
#
# {
# courses: [
# {course_id: "c/ourse/1", friend_indices: [1, 2, 3]},
# {course_id: "c/ourse/2", friend_indices: [3, 4, 5]},
# ..
# ],
# friends: [
# {username: "friend1", facebook_id: "xxx", course_indices: [2, 7, 9]},
# {username: "friend2", facebook_id: "yyy", course_indices: [1, 4, 3]},
# ...
# ]
# }
#
# Option 3. Alternatively, continue to have separate endpoints, but have both endpoints call the same underlying method
# with a built-in cache.
#
# All 3 options can make use of a common cache of results from FB.
#
# At a minimum, some performance/load testing would need to be done
# so we have an idea of these endpoints' limitations and thresholds. | unknown | codeparrot/codeparrot-clean | ||
/**
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import {render, screen, fireEvent} from '@testing-library/react';
import * as React from 'react';
import {expectLogsAndClear, log} from './expectLogs';
function Counter(props) {
'use memo';
let value = props.value;
let a = value++;
expect(a).toBe(props.value); // postfix
let b = ++value;
expect(b).toBe(props.value + 2); // previous postfix operation + prefix operation
let c = ++value;
expect(c).toBe(props.value + 3);
let d = value--;
expect(d).toBe(props.value + 3);
let e = --value;
expect(e).toBe(props.value + 1);
let f = --value;
expect(f).toBe(props.value);
expect(value).toBe(props.value);
return <span>{value}</span>;
}
test('use-state', async () => {
const {asFragment, rerender} = render(<Counter value={0} />);
expect(asFragment()).toMatchInlineSnapshot(`
<DocumentFragment>
<span>
0
</span>
</DocumentFragment>
`);
rerender(<Counter value={1} />);
expect(asFragment()).toMatchInlineSnapshot(`
<DocumentFragment>
<span>
1
</span>
</DocumentFragment>
`);
}); | javascript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/__tests__/e2e/update-expressions.e2e.js |
# -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
store_version = 1 # Needed for dynamic plugin loading
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <john@nachtimwald.com>'
__docformat__ = 'restructuredtext en'
import urllib2
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class EBookNLStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://www.ebook.nl/'
url_details = ('http://www.ebook.nl/store/{0}')
if external or self.config.get('open_external', False):
if detail_item:
url = url_details.format(detail_item)
open_url(QUrl(url))
else:
detail_url = None
if detail_item:
detail_url = url_details.format(detail_item)
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = ('http://www.ebook.nl/store/advanced_search_result.php?keywords='
+ urllib2.quote(query))
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//table[contains(@class, "productListing")]/tr'):
if counter <= 0:
break
details = data.xpath('./td/div[@class="prodImage"]/a')
if not details:
continue
details = details[0]
id = ''.join(details.xpath('./@href')).strip()
id = id[id.rfind('/')+1:]
i = id.rfind('?')
if i > 0:
id = id[:i]
if not id:
continue
cover_url = 'http://www.ebook.nl/store/' + ''.join(details.xpath('./img/@src'))
title = ''.join(details.xpath('./img/@title')).strip()
author = ''.join(data.xpath('./td/div[@class="prodTitle"]/h3/a/text()')).strip()
price = ''.join(data.xpath('./td/div[@class="prodTitle"]/b/text()'))
pdf = data.xpath('boolean(./td/div[@class="prodTitle"]/'
'p[contains(text(), "Bestandsformaat: Pdf")])')
epub = data.xpath('boolean(./td/div[@class="prodTitle"]/'
'p[contains(text(), "Bestandsformaat: ePub")])')
nodrm = data.xpath('boolean(./td/div[@class="prodTitle"]/'
'p[contains(text(), "zonder DRM") or'
' contains(text(), "watermerk")])')
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
if nodrm:
s.drm = SearchResult.DRM_UNLOCKED
else:
s.drm = SearchResult.DRM_LOCKED
s.detail_item = id
formats = []
if epub:
formats.append('ePub')
if pdf:
formats.append('PDF')
s.formats = ','.join(formats)
yield s | unknown | codeparrot/codeparrot-clean | ||
from datetime import (
datetime,
timedelta,
)
import numpy as np
import pytest
from pandas.core.dtypes.cast import (
maybe_box_native,
maybe_unbox_numpy_scalar,
)
from pandas import (
Interval,
Period,
Timedelta,
Timestamp,
)
@pytest.mark.parametrize(
"obj,expected_dtype",
[
(b"\x00\x10", bytes),
(4, int),
(np.uint(4), int),
(np.int32(-4), int),
(np.uint8(4), int),
(float(454.98), float),
(np.float16(0.4), float),
(np.float64(1.4), float),
(np.bool_(False), bool),
(datetime(2005, 2, 25), datetime),
(np.datetime64("2005-02-25"), Timestamp),
(Timestamp("2005-02-25"), Timestamp),
(np.timedelta64(1, "D"), Timedelta),
(Timedelta(1, "D"), Timedelta),
(Interval(0, 1), Interval),
(Period("4Q2005"), Period),
],
)
def test_maybe_box_native(obj, expected_dtype):
boxed_obj = maybe_box_native(obj)
result_dtype = type(boxed_obj)
assert result_dtype is expected_dtype
@pytest.mark.parametrize("typecode", np.typecodes["All"])
def test_maybe_unbox_numpy_scalar(typecode, using_python_scalars):
# https://github.com/pandas-dev/pandas/pull/63016
if typecode == "?":
scalar = False
expected = bool
elif typecode in "bhilqnpBHILQNP":
scalar = 0
expected = int
elif typecode in "efdg":
scalar = 0.0
expected = float
elif typecode in "FDG":
scalar = 0.0 + 0.0j
expected = complex
elif typecode in "SV":
scalar = b""
expected = bytes
elif typecode == "U":
scalar = ""
expected = str
elif typecode == "O":
scalar = 0
expected = int
elif typecode == "M":
scalar = datetime(2025, 1, 1)
expected = Timestamp
elif typecode == "m":
scalar = timedelta(seconds=3)
expected = Timedelta
else:
raise ValueError(f"typecode {typecode} not recognized")
value = np.array([scalar], dtype=typecode)[0]
result = maybe_unbox_numpy_scalar(value)
if using_python_scalars:
assert type(result) == expected
else:
assert result is value
def test_maybe_unbox_numpy_scalar_timestamp(unit, using_python_scalars):
# https://github.com/pandas-dev/pandas/pull/63016
value = np.datetime64(1, unit)
expected = Timestamp(1, unit=unit) if using_python_scalars else value
result = maybe_unbox_numpy_scalar(value)
assert result == expected
assert type(result) == type(expected)
def test_maybe_unbox_numpy_scalar_datetime(unit, using_python_scalars):
# https://github.com/pandas-dev/pandas/pull/63016
value = np.timedelta64(1, unit)
expected = Timedelta(1, unit=unit) if using_python_scalars else value
result = maybe_unbox_numpy_scalar(value)
assert result == expected
assert type(result) == type(expected) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/dtypes/cast/test_box_unbox.py |
/*-------------------------------------------------------------------------
*
* postgres.h
* Primary include file for PostgreSQL server .c files
*
* This should be the first file included by PostgreSQL backend modules.
* Client-side code should include postgres_fe.h instead.
*
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1995, Regents of the University of California
*
* src/include/postgres.h
*
*-------------------------------------------------------------------------
*/
/* IWYU pragma: always_keep */
/*
*----------------------------------------------------------------
* TABLE OF CONTENTS
*
* When adding stuff to this file, please try to put stuff
* into the relevant section, or add new sections as appropriate.
*
* section description
* ------- ------------------------------------------------
* 1) Datum type + support functions
* 2) miscellaneous
*
* NOTES
*
* In general, this file should contain declarations that are widely needed
* in the backend environment, but are of no interest outside the backend.
*
* Simple type definitions live in c.h, where they are shared with
* postgres_fe.h. We do that since those type definitions are needed by
* frontend modules that want to deal with binary data transmission to or
* from the backend. Type definitions in this file should be for
* representations that never escape the backend, such as Datum.
*
*----------------------------------------------------------------
*/
#ifndef POSTGRES_H
#define POSTGRES_H
/* IWYU pragma: begin_exports */
#include "c.h"
#include "utils/elog.h"
#include "utils/palloc.h"
/* IWYU pragma: end_exports */
/* ----------------------------------------------------------------
* Section 1: Datum type + support functions
* ----------------------------------------------------------------
*/
/*
* A Datum contains either a value of a pass-by-value type or a pointer to a
* value of a pass-by-reference type. Therefore, we must have
* sizeof(Datum) >= sizeof(void *). No current or foreseeable Postgres
* platform has pointers wider than 8 bytes, and standardizing on Datum being
* exactly 8 bytes has advantages in reducing cross-platform differences.
*
* The functions below and the analogous functions for other types should be used to
* convert between a Datum and the appropriate C type.
*/
typedef uint64_t Datum;
/*
* This symbol is now vestigial, but we continue to define it so as not to
* unnecessarily break extension code.
*/
#define SIZEOF_DATUM 8
/*
* A NullableDatum is used in places where both a Datum and its nullness needs
* to be stored. This can be more efficient than storing datums and nullness
* in separate arrays, due to better spatial locality, even if more space may
* be wasted due to padding.
*/
typedef struct NullableDatum
{
#define FIELDNO_NULLABLE_DATUM_DATUM 0
Datum value;
#define FIELDNO_NULLABLE_DATUM_ISNULL 1
bool isnull;
/* due to alignment padding this could be used for flags for free */
} NullableDatum;
/*
* DatumGetBool
* Returns boolean value of a datum.
*
* Note: any nonzero value will be considered true.
*/
static inline bool
DatumGetBool(Datum X)
{
return (X != 0);
}
/*
* BoolGetDatum
* Returns datum representation for a boolean.
*
* Note: any nonzero value will be considered true.
*/
static inline Datum
BoolGetDatum(bool X)
{
return (Datum) (X ? 1 : 0);
}
/*
* DatumGetChar
* Returns character value of a datum.
*/
static inline char
DatumGetChar(Datum X)
{
return (char) X;
}
/*
* CharGetDatum
* Returns datum representation for a character.
*/
static inline Datum
CharGetDatum(char X)
{
return (Datum) X;
}
/*
* Int8GetDatum
* Returns datum representation for an 8-bit integer.
*/
static inline Datum
Int8GetDatum(int8 X)
{
return (Datum) X;
}
/*
* DatumGetUInt8
* Returns 8-bit unsigned integer value of a datum.
*/
static inline uint8
DatumGetUInt8(Datum X)
{
return (uint8) X;
}
/*
* UInt8GetDatum
* Returns datum representation for an 8-bit unsigned integer.
*/
static inline Datum
UInt8GetDatum(uint8 X)
{
return (Datum) X;
}
/*
* DatumGetInt16
* Returns 16-bit integer value of a datum.
*/
static inline int16
DatumGetInt16(Datum X)
{
return (int16) X;
}
/*
* Int16GetDatum
* Returns datum representation for a 16-bit integer.
*/
static inline Datum
Int16GetDatum(int16 X)
{
return (Datum) X;
}
/*
* DatumGetUInt16
* Returns 16-bit unsigned integer value of a datum.
*/
static inline uint16
DatumGetUInt16(Datum X)
{
return (uint16) X;
}
/*
* UInt16GetDatum
* Returns datum representation for a 16-bit unsigned integer.
*/
static inline Datum
UInt16GetDatum(uint16 X)
{
return (Datum) X;
}
/*
* DatumGetInt32
* Returns 32-bit integer value of a datum.
*/
static inline int32
DatumGetInt32(Datum X)
{
return (int32) X;
}
/*
* Int32GetDatum
* Returns datum representation for a 32-bit integer.
*/
static inline Datum
Int32GetDatum(int32 X)
{
return (Datum) X;
}
/*
* DatumGetUInt32
* Returns 32-bit unsigned integer value of a datum.
*/
static inline uint32
DatumGetUInt32(Datum X)
{
return (uint32) X;
}
/*
* UInt32GetDatum
* Returns datum representation for a 32-bit unsigned integer.
*/
static inline Datum
UInt32GetDatum(uint32 X)
{
return (Datum) X;
}
/*
* DatumGetObjectId
* Returns object identifier value of a datum.
*/
static inline Oid
DatumGetObjectId(Datum X)
{
return (Oid) X;
}
/*
* ObjectIdGetDatum
* Returns datum representation for an object identifier.
*/
static inline Datum
ObjectIdGetDatum(Oid X)
{
return (Datum) X;
}
/*
* DatumGetObjectId8
* Returns 8-byte object identifier value of a datum.
*/
static inline Oid8
DatumGetObjectId8(Datum X)
{
return (Oid8) X;
}
/*
* ObjectId8GetDatum
* Returns datum representation for an 8-byte object identifier
*/
static inline Datum
ObjectId8GetDatum(Oid8 X)
{
return (Datum) X;
}
/*
* DatumGetTransactionId
* Returns transaction identifier value of a datum.
*/
static inline TransactionId
DatumGetTransactionId(Datum X)
{
return (TransactionId) X;
}
/*
* TransactionIdGetDatum
* Returns datum representation for a transaction identifier.
*/
static inline Datum
TransactionIdGetDatum(TransactionId X)
{
return (Datum) X;
}
/*
* MultiXactIdGetDatum
* Returns datum representation for a multixact identifier.
*/
static inline Datum
MultiXactIdGetDatum(MultiXactId X)
{
return (Datum) X;
}
/*
* DatumGetCommandId
* Returns command identifier value of a datum.
*/
static inline CommandId
DatumGetCommandId(Datum X)
{
return (CommandId) X;
}
/*
* CommandIdGetDatum
* Returns datum representation for a command identifier.
*/
static inline Datum
CommandIdGetDatum(CommandId X)
{
return (Datum) X;
}
/*
* DatumGetPointer
* Returns pointer value of a datum.
*/
static inline Pointer
DatumGetPointer(Datum X)
{
return (Pointer) (uintptr_t) X;
}
/*
* PointerGetDatum
* Returns datum representation for a pointer.
*/
static inline Datum
PointerGetDatum(const void *X)
{
return (Datum) (uintptr_t) X;
}
/*
* DatumGetCString
* Returns C string (null-terminated string) value of a datum.
*
* Note: C string is not a full-fledged Postgres type at present,
* but type input functions use this conversion for their inputs.
*/
static inline char *
DatumGetCString(Datum X)
{
return (char *) DatumGetPointer(X);
}
/*
* CStringGetDatum
* Returns datum representation for a C string (null-terminated string).
*
* Note: C string is not a full-fledged Postgres type at present,
* but type output functions use this conversion for their outputs.
* Note: CString is pass-by-reference; caller must ensure the pointed-to
* value has adequate lifetime.
*/
static inline Datum
CStringGetDatum(const char *X)
{
return PointerGetDatum(X);
}
/*
* DatumGetName
* Returns name value of a datum.
*/
static inline Name
DatumGetName(Datum X)
{
return (Name) DatumGetPointer(X);
}
/*
* NameGetDatum
* Returns datum representation for a name.
*
* Note: Name is pass-by-reference; caller must ensure the pointed-to
* value has adequate lifetime.
*/
static inline Datum
NameGetDatum(const NameData *X)
{
return CStringGetDatum(NameStr(*X));
}
/*
* DatumGetInt64
* Returns 64-bit integer value of a datum.
*/
static inline int64
DatumGetInt64(Datum X)
{
return (int64) X;
}
/*
* Int64GetDatum
* Returns datum representation for a 64-bit integer.
*/
static inline Datum
Int64GetDatum(int64 X)
{
return (Datum) X;
}
/*
* DatumGetUInt64
* Returns 64-bit unsigned integer value of a datum.
*/
static inline uint64
DatumGetUInt64(Datum X)
{
return (uint64) X;
}
/*
* UInt64GetDatum
* Returns datum representation for a 64-bit unsigned integer.
*/
static inline Datum
UInt64GetDatum(uint64 X)
{
return (Datum) X;
}
/*
* Float <-> Datum conversions
*
* These have to be implemented as inline functions rather than macros, when
* passing by value, because many machines pass int and float function
* parameters/results differently; so we need to play weird games with unions.
*/
/*
* DatumGetFloat4
* Returns 4-byte floating point value of a datum.
*/
static inline float4
DatumGetFloat4(Datum X)
{
union
{
int32 value;
float4 retval;
} myunion;
myunion.value = DatumGetInt32(X);
return myunion.retval;
}
/*
* Float4GetDatum
* Returns datum representation for a 4-byte floating point number.
*/
static inline Datum
Float4GetDatum(float4 X)
{
union
{
float4 value;
int32 retval;
} myunion;
myunion.value = X;
return Int32GetDatum(myunion.retval);
}
/*
* DatumGetFloat8
* Returns 8-byte floating point value of a datum.
*/
static inline float8
DatumGetFloat8(Datum X)
{
union
{
int64 value;
float8 retval;
} myunion;
myunion.value = DatumGetInt64(X);
return myunion.retval;
}
/*
* Float8GetDatum
* Returns datum representation for an 8-byte floating point number.
*/
static inline Datum
Float8GetDatum(float8 X)
{
union
{
float8 value;
int64 retval;
} myunion;
myunion.value = X;
return Int64GetDatum(myunion.retval);
}
/*
* Int64GetDatumFast
* Float8GetDatumFast
*
* These macros were intended to allow writing code that does not depend on
* whether int64 and float8 are pass-by-reference types, while not
* sacrificing performance when they are. They are no longer different
* from the regular functions, though we keep the assertions to protect
* code that might get back-patched into older branches.
*/
#define Int64GetDatumFast(X) \
(StaticAssertVariableIsOfTypeMacro(X, int64), Int64GetDatum(X))
#define Float8GetDatumFast(X) \
(StaticAssertVariableIsOfTypeMacro(X, double), Float8GetDatum(X))
/* ----------------------------------------------------------------
* Section 2: miscellaneous
* ----------------------------------------------------------------
*/
/*
* pg_ternary
* Boolean value with an extra "unset" value
*
* This enum can be used for values that want to distinguish between true,
* false, and unset.
*/
typedef enum pg_ternary
{
PG_TERNARY_FALSE = 0,
PG_TERNARY_TRUE = 1,
PG_TERNARY_UNSET = -1
} pg_ternary;
/*
* NON_EXEC_STATIC: It's sometimes useful to define a variable or function
* that is normally static but extern when using EXEC_BACKEND (see
* pg_config_manual.h). There would then typically be some code in
* postmaster.c that uses those extern symbols to transfer state between
* processes or do whatever other things it needs to do in EXEC_BACKEND mode.
*/
#ifdef EXEC_BACKEND
#define NON_EXEC_STATIC
#else
#define NON_EXEC_STATIC static
#endif
#endif /* POSTGRES_H */ | c | github | https://github.com/postgres/postgres | src/include/postgres.h |
from django.core.management.base import BaseCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"""Converts a module namespace to a Python dictionary."""
return dict((k, repr(v)) for k, v in module.__dict__.items() if not omittable(k))
class Command(BaseCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('--all', action='store_true', dest='all', default=False,
help='Display all settings, regardless of their value. '
'Default values are prefixed by "###".')
def handle(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
for key in sorted(user_settings):
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
elif options['all']:
output.append("### %s = %s" % (key, user_settings[key]))
return '\n'.join(output) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CMAC: Cipher Block Mode for Authentication
*
* Copyright © 2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* Based on work by:
* Copyright © 2013 Tom St Denis <tstdenis@elliptictech.com>
* Based on crypto/xcbc.c:
* Copyright © 2006 USAGI/WIDE Project,
* Author: Kazunori Miyazawa <miyazawa@linux-ipv6.org>
*/
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/utils.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
/*
* +------------------------
* | <parent tfm>
* +------------------------
* | cmac_tfm_ctx
* +------------------------
* | consts (block size * 2)
* +------------------------
*/
struct cmac_tfm_ctx {
struct crypto_cipher *child;
__be64 consts[];
};
static int crypto_cmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
unsigned int bs = crypto_shash_blocksize(parent);
__be64 *consts = ctx->consts;
u64 _const[2];
int i, err = 0;
u8 msb_mask, gfmask;
err = crypto_cipher_setkey(ctx->child, inkey, keylen);
if (err)
return err;
/* encrypt the zero block */
memset(consts, 0, bs);
crypto_cipher_encrypt_one(ctx->child, (u8 *)consts, (u8 *)consts);
switch (bs) {
case 16:
gfmask = 0x87;
_const[0] = be64_to_cpu(consts[1]);
_const[1] = be64_to_cpu(consts[0]);
/* gf(2^128) multiply zero-ciphertext with u and u^2 */
for (i = 0; i < 4; i += 2) {
msb_mask = ((s64)_const[1] >> 63) & gfmask;
_const[1] = (_const[1] << 1) | (_const[0] >> 63);
_const[0] = (_const[0] << 1) ^ msb_mask;
consts[i + 0] = cpu_to_be64(_const[1]);
consts[i + 1] = cpu_to_be64(_const[0]);
}
break;
case 8:
gfmask = 0x1B;
_const[0] = be64_to_cpu(consts[0]);
/* gf(2^64) multiply zero-ciphertext with u and u^2 */
for (i = 0; i < 2; i++) {
msb_mask = ((s64)_const[0] >> 63) & gfmask;
_const[0] = (_const[0] << 1) ^ msb_mask;
consts[i] = cpu_to_be64(_const[0]);
}
break;
}
return 0;
}
static int crypto_cmac_digest_init(struct shash_desc *pdesc)
{
int bs = crypto_shash_blocksize(pdesc->tfm);
u8 *prev = shash_desc_ctx(pdesc);
memset(prev, 0, bs);
return 0;
}
static int crypto_cmac_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
u8 *prev = shash_desc_ctx(pdesc);
do {
crypto_xor(prev, p, bs);
crypto_cipher_encrypt_one(tfm, prev, prev);
p += bs;
len -= bs;
} while (len >= bs);
return len;
}
static int crypto_cmac_digest_finup(struct shash_desc *pdesc, const u8 *src,
unsigned int len, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_blocksize(parent);
u8 *prev = shash_desc_ctx(pdesc);
unsigned int offset = 0;
crypto_xor(prev, src, len);
if (len != bs) {
prev[len] ^= 0x80;
offset += bs;
}
crypto_xor(prev, (const u8 *)tctx->consts + offset, bs);
crypto_cipher_encrypt_one(tfm, out, prev);
return 0;
}
static int cmac_init_tfm(struct crypto_shash *tfm)
{
struct shash_instance *inst = shash_alg_instance(tfm);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
struct crypto_cipher_spawn *spawn;
struct crypto_cipher *cipher;
spawn = shash_instance_ctx(inst);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static int cmac_clone_tfm(struct crypto_shash *tfm, struct crypto_shash *otfm)
{
struct cmac_tfm_ctx *octx = crypto_shash_ctx(otfm);
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
struct crypto_cipher *cipher;
cipher = crypto_clone_cipher(octx->child);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
}
static void cmac_exit_tfm(struct crypto_shash *tfm)
{
struct cmac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static int cmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
switch (alg->cra_blocksize) {
case 16:
case 8:
break;
default:
err = -EINVAL;
goto err_free_inst;
}
err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct cmac_tfm_ctx) +
alg->cra_blocksize * 2;
inst->alg.base.cra_flags = CRYPTO_AHASH_ALG_BLOCK_ONLY |
CRYPTO_AHASH_ALG_FINAL_NONZERO;
inst->alg.digestsize = alg->cra_blocksize;
inst->alg.descsize = alg->cra_blocksize;
inst->alg.init = crypto_cmac_digest_init;
inst->alg.update = crypto_cmac_digest_update;
inst->alg.finup = crypto_cmac_digest_finup;
inst->alg.setkey = crypto_cmac_digest_setkey;
inst->alg.init_tfm = cmac_init_tfm;
inst->alg.clone_tfm = cmac_clone_tfm;
inst->alg.exit_tfm = cmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
shash_free_singlespawn_instance(inst);
}
return err;
}
static struct crypto_template crypto_cmac_tmpl = {
.name = "cmac",
.create = cmac_create,
.module = THIS_MODULE,
};
static int __init crypto_cmac_module_init(void)
{
return crypto_register_template(&crypto_cmac_tmpl);
}
static void __exit crypto_cmac_module_exit(void)
{
crypto_unregister_template(&crypto_cmac_tmpl);
}
module_init(crypto_cmac_module_init);
module_exit(crypto_cmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CMAC keyed hash algorithm");
MODULE_ALIAS_CRYPTO("cmac");
MODULE_IMPORT_NS("CRYPTO_INTERNAL"); | c | github | https://github.com/torvalds/linux | crypto/cmac.c |
"""Retriever that generates and executes structured queries over its own data source."""
import logging
from collections.abc import Sequence
from typing import Any
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import Runnable
from langchain_core.structured_query import StructuredQuery, Visitor
from langchain_core.vectorstores import VectorStore
from pydantic import ConfigDict, Field, model_validator
from typing_extensions import override
from langchain_classic.chains.query_constructor.base import (
load_query_constructor_runnable,
)
from langchain_classic.chains.query_constructor.schema import AttributeInfo
logger = logging.getLogger(__name__)
QUERY_CONSTRUCTOR_RUN_NAME = "query_constructor"
def _get_builtin_translator(vectorstore: VectorStore) -> Visitor:
"""Get the translator class corresponding to the vector store class."""
try:
import langchain_community # noqa: F401
except ImportError as err:
msg = (
"The langchain-community package must be installed to use this feature."
" Please install it using `pip install langchain-community`."
)
raise ImportError(msg) from err
from langchain_community.query_constructors.astradb import AstraDBTranslator
from langchain_community.query_constructors.chroma import ChromaTranslator
from langchain_community.query_constructors.dashvector import DashvectorTranslator
from langchain_community.query_constructors.databricks_vector_search import (
DatabricksVectorSearchTranslator,
)
from langchain_community.query_constructors.deeplake import DeepLakeTranslator
from langchain_community.query_constructors.dingo import DingoDBTranslator
from langchain_community.query_constructors.elasticsearch import (
ElasticsearchTranslator,
)
from langchain_community.query_constructors.milvus import MilvusTranslator
from langchain_community.query_constructors.mongodb_atlas import (
MongoDBAtlasTranslator,
)
from langchain_community.query_constructors.myscale import MyScaleTranslator
from langchain_community.query_constructors.neo4j import Neo4jTranslator
from langchain_community.query_constructors.opensearch import OpenSearchTranslator
from langchain_community.query_constructors.pgvector import PGVectorTranslator
from langchain_community.query_constructors.pinecone import PineconeTranslator
from langchain_community.query_constructors.qdrant import QdrantTranslator
from langchain_community.query_constructors.redis import RedisTranslator
from langchain_community.query_constructors.supabase import SupabaseVectorTranslator
from langchain_community.query_constructors.tencentvectordb import (
TencentVectorDBTranslator,
)
from langchain_community.query_constructors.timescalevector import (
TimescaleVectorTranslator,
)
from langchain_community.query_constructors.vectara import VectaraTranslator
from langchain_community.query_constructors.weaviate import WeaviateTranslator
from langchain_community.vectorstores import (
AstraDB,
DashVector,
DatabricksVectorSearch,
DeepLake,
Dingo,
Milvus,
MyScale,
Neo4jVector,
OpenSearchVectorSearch,
PGVector,
Qdrant,
Redis,
SupabaseVectorStore,
TencentVectorDB,
TimescaleVector,
Vectara,
Weaviate,
)
from langchain_community.vectorstores import (
Chroma as CommunityChroma,
)
from langchain_community.vectorstores import (
ElasticsearchStore as ElasticsearchStoreCommunity,
)
from langchain_community.vectorstores import (
MongoDBAtlasVectorSearch as CommunityMongoDBAtlasVectorSearch,
)
from langchain_community.vectorstores import (
Pinecone as CommunityPinecone,
)
builtin_translators: dict[type[VectorStore], type[Visitor]] = {
AstraDB: AstraDBTranslator,
PGVector: PGVectorTranslator,
CommunityPinecone: PineconeTranslator,
CommunityChroma: ChromaTranslator,
DashVector: DashvectorTranslator,
Dingo: DingoDBTranslator,
Weaviate: WeaviateTranslator,
Vectara: VectaraTranslator,
Qdrant: QdrantTranslator,
MyScale: MyScaleTranslator,
DeepLake: DeepLakeTranslator,
ElasticsearchStoreCommunity: ElasticsearchTranslator,
Milvus: MilvusTranslator,
SupabaseVectorStore: SupabaseVectorTranslator,
TimescaleVector: TimescaleVectorTranslator,
OpenSearchVectorSearch: OpenSearchTranslator,
CommunityMongoDBAtlasVectorSearch: MongoDBAtlasTranslator,
Neo4jVector: Neo4jTranslator,
}
if isinstance(vectorstore, DatabricksVectorSearch):
return DatabricksVectorSearchTranslator()
if isinstance(vectorstore, MyScale):
return MyScaleTranslator(metadata_key=vectorstore.metadata_column)
if isinstance(vectorstore, Redis):
return RedisTranslator.from_vectorstore(vectorstore)
if isinstance(vectorstore, TencentVectorDB):
fields = [
field.name for field in (vectorstore.meta_fields or []) if field.index
]
return TencentVectorDBTranslator(fields)
if vectorstore.__class__ in builtin_translators:
return builtin_translators[vectorstore.__class__]()
try:
from langchain_astradb.vectorstores import AstraDBVectorStore
except ImportError:
pass
else:
if isinstance(vectorstore, AstraDBVectorStore):
return AstraDBTranslator()
try:
from langchain_elasticsearch.vectorstores import ElasticsearchStore
except ImportError:
pass
else:
if isinstance(vectorstore, ElasticsearchStore):
return ElasticsearchTranslator()
try:
from langchain_pinecone import PineconeVectorStore
except ImportError:
pass
else:
if isinstance(vectorstore, PineconeVectorStore):
return PineconeTranslator()
try:
from langchain_milvus import Milvus
except ImportError:
pass
else:
if isinstance(vectorstore, Milvus):
return MilvusTranslator()
try:
from langchain_mongodb import MongoDBAtlasVectorSearch
except ImportError:
pass
else:
if isinstance(vectorstore, MongoDBAtlasVectorSearch):
return MongoDBAtlasTranslator()
try:
from langchain_neo4j import Neo4jVector
except ImportError:
pass
else:
if isinstance(vectorstore, Neo4jVector):
return Neo4jTranslator()
try:
# Trying langchain_chroma import if exists
from langchain_chroma import Chroma
except ImportError:
pass
else:
if isinstance(vectorstore, Chroma):
return ChromaTranslator()
try:
from langchain_postgres import PGVector
from langchain_postgres import PGVectorTranslator as NewPGVectorTranslator
except ImportError:
pass
else:
if isinstance(vectorstore, PGVector):
return NewPGVectorTranslator()
try:
from langchain_qdrant import QdrantVectorStore
except ImportError:
pass
else:
if isinstance(vectorstore, QdrantVectorStore):
return QdrantTranslator(metadata_key=vectorstore.metadata_payload_key)
try:
# Added in langchain-community==0.2.11
from langchain_community.query_constructors.hanavector import HanaTranslator
from langchain_community.vectorstores import HanaDB
except ImportError:
pass
else:
if isinstance(vectorstore, HanaDB):
return HanaTranslator()
try:
# Trying langchain_weaviate (weaviate v4) import if exists
from langchain_weaviate.vectorstores import WeaviateVectorStore
except ImportError:
pass
else:
if isinstance(vectorstore, WeaviateVectorStore):
return WeaviateTranslator()
msg = (
f"Self query retriever with Vector Store type {vectorstore.__class__}"
f" not supported."
)
raise ValueError(msg)
class SelfQueryRetriever(BaseRetriever):
"""Self Query Retriever.
Retriever that uses a vector store and an LLM to generate the vector store queries.
"""
vectorstore: VectorStore
"""The underlying vector store from which documents will be retrieved."""
query_constructor: Runnable[dict, StructuredQuery] = Field(alias="llm_chain")
"""The query constructor chain for generating the vector store queries.
llm_chain is legacy name kept for backwards compatibility."""
search_type: str = "similarity"
"""The search type to perform on the vector store."""
search_kwargs: dict = Field(default_factory=dict)
"""Keyword arguments to pass in to the vector store search."""
structured_query_translator: Visitor
"""Translator for turning internal query language into `VectorStore` search params.""" # noqa: E501
verbose: bool = False
use_original_query: bool = False
"""Use original query instead of the revised new query from LLM"""
model_config = ConfigDict(
populate_by_name=True,
arbitrary_types_allowed=True,
)
@model_validator(mode="before")
@classmethod
def validate_translator(cls, values: dict) -> Any:
"""Validate translator."""
if "structured_query_translator" not in values:
values["structured_query_translator"] = _get_builtin_translator(
values["vectorstore"],
)
return values
@property
def llm_chain(self) -> Runnable:
"""llm_chain is legacy name kept for backwards compatibility."""
return self.query_constructor
def _prepare_query(
self,
query: str,
structured_query: StructuredQuery,
) -> tuple[str, dict[str, Any]]:
new_query, new_kwargs = self.structured_query_translator.visit_structured_query(
structured_query,
)
if structured_query.limit is not None:
new_kwargs["k"] = structured_query.limit
if self.use_original_query:
new_query = query
search_kwargs = {**self.search_kwargs, **new_kwargs}
return new_query, search_kwargs
def _get_docs_with_query(
self,
query: str,
search_kwargs: dict[str, Any],
) -> list[Document]:
return self.vectorstore.search(query, self.search_type, **search_kwargs)
async def _aget_docs_with_query(
self,
query: str,
search_kwargs: dict[str, Any],
) -> list[Document]:
return await self.vectorstore.asearch(query, self.search_type, **search_kwargs)
@override
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> list[Document]:
structured_query = self.query_constructor.invoke(
{"query": query},
config={"callbacks": run_manager.get_child()},
)
if self.verbose:
logger.info("Generated Query: %s", structured_query)
new_query, search_kwargs = self._prepare_query(query, structured_query)
return self._get_docs_with_query(new_query, search_kwargs)
@override
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> list[Document]:
structured_query = await self.query_constructor.ainvoke(
{"query": query},
config={"callbacks": run_manager.get_child()},
)
if self.verbose:
logger.info("Generated Query: %s", structured_query)
new_query, search_kwargs = self._prepare_query(query, structured_query)
return await self._aget_docs_with_query(new_query, search_kwargs)
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
vectorstore: VectorStore,
document_contents: str,
metadata_field_info: Sequence[AttributeInfo | dict],
structured_query_translator: Visitor | None = None,
chain_kwargs: dict | None = None,
enable_limit: bool = False, # noqa: FBT001,FBT002
use_original_query: bool = False, # noqa: FBT001,FBT002
**kwargs: Any,
) -> "SelfQueryRetriever":
"""Create a SelfQueryRetriever from an LLM and a vector store.
Args:
llm: The language model to use for generating queries.
vectorstore: The vector store to use for retrieving documents.
document_contents: Description of the page contents of the document to be
queried.
metadata_field_info: Metadata field information for the documents.
structured_query_translator: Optional translator for turning internal query
language into `VectorStore` search params.
chain_kwargs: Additional keyword arguments for the query constructor.
enable_limit: Whether to enable the limit operator.
use_original_query: Whether to use the original query instead of the revised
query from the LLM.
**kwargs: Additional keyword arguments for the SelfQueryRetriever.
Returns:
An instance of SelfQueryRetriever.
"""
if structured_query_translator is None:
structured_query_translator = _get_builtin_translator(vectorstore)
chain_kwargs = chain_kwargs or {}
if (
"allowed_comparators" not in chain_kwargs
and structured_query_translator.allowed_comparators is not None
):
chain_kwargs["allowed_comparators"] = (
structured_query_translator.allowed_comparators
)
if (
"allowed_operators" not in chain_kwargs
and structured_query_translator.allowed_operators is not None
):
chain_kwargs["allowed_operators"] = (
structured_query_translator.allowed_operators
)
query_constructor = load_query_constructor_runnable(
llm,
document_contents,
metadata_field_info,
enable_limit=enable_limit,
**chain_kwargs,
)
query_constructor = query_constructor.with_config(
run_name=QUERY_CONSTRUCTOR_RUN_NAME,
)
return cls(
query_constructor=query_constructor,
vectorstore=vectorstore,
use_original_query=use_original_query,
structured_query_translator=structured_query_translator,
**kwargs,
) | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/retrievers/self_query/base.py |
//===- bolt/Passes/ThreeWayBranch.cpp -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the ThreeWayBranch class.
//
//===----------------------------------------------------------------------===//
#include "bolt/Passes/ThreeWayBranch.h"
using namespace llvm;
namespace llvm {
namespace bolt {
bool ThreeWayBranch::shouldRunOnFunction(BinaryFunction &Function) {
BinaryContext &BC = Function.getBinaryContext();
for (const BinaryBasicBlock &BB : Function)
for (const MCInst &Inst : BB)
if (BC.MIB->isPacked(Inst))
return false;
return true;
}
void ThreeWayBranch::runOnFunction(BinaryFunction &Function) {
BinaryContext &BC = Function.getBinaryContext();
MCContext *Ctx = BC.Ctx.get();
// New blocks will be added and layout will change,
// so make a copy here to iterate over the original layout
BinaryFunction::BasicBlockOrderType BlockLayout(
Function.getLayout().block_begin(), Function.getLayout().block_end());
for (BinaryBasicBlock *BB : BlockLayout) {
// The block must be hot
if (BB->getExecutionCount() == 0 ||
BB->getExecutionCount() == BinaryBasicBlock::COUNT_NO_PROFILE)
continue;
// with two successors
if (BB->succ_size() != 2)
continue;
// no jump table
if (BB->hasJumpTable())
continue;
BinaryBasicBlock *FalseSucc = BB->getConditionalSuccessor(false);
BinaryBasicBlock *TrueSucc = BB->getConditionalSuccessor(true);
// One of BB's successors must have only one instruction that is a
// conditional jump
if ((FalseSucc->succ_size() != 2 || FalseSucc->size() != 1) &&
(TrueSucc->succ_size() != 2 || TrueSucc->size() != 1))
continue;
// SecondBranch has the second conditional jump
BinaryBasicBlock *SecondBranch = FalseSucc;
BinaryBasicBlock *FirstEndpoint = TrueSucc;
if (FalseSucc->succ_size() != 2) {
SecondBranch = TrueSucc;
FirstEndpoint = FalseSucc;
}
BinaryBasicBlock *SecondEndpoint =
SecondBranch->getConditionalSuccessor(false);
BinaryBasicBlock *ThirdEndpoint =
SecondBranch->getConditionalSuccessor(true);
// Make sure we can modify the jump in SecondBranch without disturbing any
// other paths
if (SecondBranch->pred_size() != 1)
continue;
// Get Jump Instructions
MCInst *FirstJump = BB->getLastNonPseudoInstr();
MCInst *SecondJump = SecondBranch->getLastNonPseudoInstr();
// Get condition codes
unsigned FirstCC = BC.MIB->getCondCode(*FirstJump);
if (SecondBranch != FalseSucc)
FirstCC = BC.MIB->getInvertedCondCode(FirstCC);
// ThirdCC = ThirdCond && !FirstCC = !(!ThirdCond ||
// !(!FirstCC)) = !(!ThirdCond || FirstCC)
unsigned ThirdCC =
BC.MIB->getInvertedCondCode(BC.MIB->getCondCodesLogicalOr(
BC.MIB->getInvertedCondCode(BC.MIB->getCondCode(*SecondJump)),
FirstCC));
// SecondCC = !ThirdCond && !FirstCC = !(!(!ThirdCond) ||
// !(!FirstCC)) = !(ThirdCond || FirstCC)
unsigned SecondCC =
BC.MIB->getInvertedCondCode(BC.MIB->getCondCodesLogicalOr(
BC.MIB->getCondCode(*SecondJump), FirstCC));
if (!BC.MIB->isValidCondCode(FirstCC) ||
!BC.MIB->isValidCondCode(ThirdCC) || !BC.MIB->isValidCondCode(SecondCC))
continue;
std::vector<std::pair<BinaryBasicBlock *, unsigned>> Blocks;
Blocks.push_back(std::make_pair(FirstEndpoint, FirstCC));
Blocks.push_back(std::make_pair(SecondEndpoint, SecondCC));
Blocks.push_back(std::make_pair(ThirdEndpoint, ThirdCC));
llvm::sort(Blocks, [&](const std::pair<BinaryBasicBlock *, unsigned> A,
const std::pair<BinaryBasicBlock *, unsigned> B) {
return A.first->getExecutionCount() < B.first->getExecutionCount();
});
uint64_t NewSecondBranchCount = Blocks[1].first->getExecutionCount() +
Blocks[0].first->getExecutionCount();
bool SecondBranchBigger =
NewSecondBranchCount > Blocks[2].first->getExecutionCount();
BB->removeAllSuccessors();
if (SecondBranchBigger) {
BB->addSuccessor(Blocks[2].first, Blocks[2].first->getExecutionCount());
BB->addSuccessor(SecondBranch, NewSecondBranchCount);
} else {
BB->addSuccessor(SecondBranch, NewSecondBranchCount);
BB->addSuccessor(Blocks[2].first, Blocks[2].first->getExecutionCount());
}
// Remove and add so there is no duplicate successors
SecondBranch->removeAllSuccessors();
SecondBranch->addSuccessor(Blocks[0].first,
Blocks[0].first->getExecutionCount());
SecondBranch->addSuccessor(Blocks[1].first,
Blocks[1].first->getExecutionCount());
SecondBranch->setExecutionCount(NewSecondBranchCount);
// Replace the branch condition to fallthrough for the most common block
if (SecondBranchBigger)
BC.MIB->replaceBranchCondition(*FirstJump, Blocks[2].first->getLabel(),
Ctx, Blocks[2].second);
else
BC.MIB->replaceBranchCondition(
*FirstJump, SecondBranch->getLabel(), Ctx,
BC.MIB->getInvertedCondCode(Blocks[2].second));
// Replace the branch condition to fallthrough for the second most common
// block
BC.MIB->replaceBranchCondition(*SecondJump, Blocks[0].first->getLabel(),
Ctx, Blocks[0].second);
++BranchesAltered;
}
}
Error ThreeWayBranch::runOnFunctions(BinaryContext &BC) {
for (auto &It : BC.getBinaryFunctions()) {
BinaryFunction &Function = It.second;
if (!shouldRunOnFunction(Function))
continue;
runOnFunction(Function);
}
BC.outs() << "BOLT-INFO: number of three way branches order changed: "
<< BranchesAltered << "\n";
return Error::success();
}
} // end namespace bolt
} // end namespace llvm | cpp | github | https://github.com/llvm/llvm-project | bolt/lib/Passes/ThreeWayBranch.cpp |
//! Contains `ParseSess` which holds state living beyond what one `Parser` might.
//! It also serves as an input to the parser itself.
use std::str;
use std::sync::Arc;
use rustc_ast::attr::AttrIdGenerator;
use rustc_ast::node_id::NodeId;
use rustc_data_structures::fx::{FxHashMap, FxIndexMap, FxIndexSet};
use rustc_data_structures::sync::{AppendOnlyVec, Lock};
use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitter;
use rustc_errors::emitter::{EmitterWithNote, stderr_destination};
use rustc_errors::translation::Translator;
use rustc_errors::{
BufferedEarlyLint, ColorConfig, DecorateDiagCompat, Diag, DiagCtxt, DiagCtxtHandle,
DiagMessage, EmissionGuarantee, MultiSpan, StashKey,
};
use rustc_feature::{GateIssue, UnstableFeatures, find_feature_issue};
use rustc_span::edition::Edition;
use rustc_span::hygiene::ExpnId;
use rustc_span::source_map::{FilePathMapping, SourceMap};
use rustc_span::{Span, Symbol, sym};
use crate::Session;
use crate::config::{Cfg, CheckCfg};
use crate::errors::{
CliFeatureDiagnosticHelp, FeatureDiagnosticForIssue, FeatureDiagnosticHelp,
FeatureDiagnosticSuggestion, FeatureGateError, SuggestUpgradeCompiler,
};
use crate::lint::builtin::UNSTABLE_SYNTAX_PRE_EXPANSION;
use crate::lint::{Lint, LintId};
/// Collected spans during parsing for places where a certain feature was
/// used and should be feature gated accordingly in `check_crate`.
#[derive(Default)]
pub struct GatedSpans {
pub spans: Lock<FxHashMap<Symbol, Vec<Span>>>,
}
impl GatedSpans {
/// Feature gate the given `span` under the given `feature`
/// which is same `Symbol` used in `unstable.rs`.
pub fn gate(&self, feature: Symbol, span: Span) {
self.spans.borrow_mut().entry(feature).or_default().push(span);
}
/// Ungate the last span under the given `feature`.
/// Panics if the given `span` wasn't the last one.
///
/// Using this is discouraged unless you have a really good reason to.
pub fn ungate_last(&self, feature: Symbol, span: Span) {
let removed_span = self.spans.borrow_mut().entry(feature).or_default().pop().unwrap();
debug_assert_eq!(span, removed_span);
}
/// Prepend the given set of `spans` onto the set in `self`.
pub fn merge(&self, mut spans: FxHashMap<Symbol, Vec<Span>>) {
let mut inner = self.spans.borrow_mut();
// The entries will be moved to another map so the drain order does not
// matter.
#[allow(rustc::potential_query_instability)]
for (gate, mut gate_spans) in inner.drain() {
spans.entry(gate).or_default().append(&mut gate_spans);
}
*inner = spans;
}
}
#[derive(Default)]
pub struct SymbolGallery {
/// All symbols occurred and their first occurrence span.
pub symbols: Lock<FxIndexMap<Symbol, Span>>,
}
impl SymbolGallery {
/// Insert a symbol and its span into symbol gallery.
/// If the symbol has occurred before, ignore the new occurrence.
pub fn insert(&self, symbol: Symbol, span: Span) {
self.symbols.lock().entry(symbol).or_insert(span);
}
}
// todo: this function now accepts `Session` instead of `ParseSess` and should be relocated
/// Construct a diagnostic for a language feature error due to the given `span`.
/// The `feature`'s `Symbol` is the one you used in `unstable.rs` and `rustc_span::symbol`.
#[track_caller]
pub fn feature_err(
sess: &Session,
feature: Symbol,
span: impl Into<MultiSpan>,
explain: impl Into<DiagMessage>,
) -> Diag<'_> {
feature_err_issue(sess, feature, span, GateIssue::Language, explain)
}
/// Construct a diagnostic for a feature gate error.
///
/// This variant allows you to control whether it is a library or language feature.
/// Almost always, you want to use this for a language feature. If so, prefer `feature_err`.
#[track_caller]
pub fn feature_err_issue(
sess: &Session,
feature: Symbol,
span: impl Into<MultiSpan>,
issue: GateIssue,
explain: impl Into<DiagMessage>,
) -> Diag<'_> {
let span = span.into();
// Cancel an earlier warning for this same error, if it exists.
if let Some(span) = span.primary_span()
&& let Some(err) = sess.dcx().steal_non_err(span, StashKey::EarlySyntaxWarning)
{
err.cancel()
}
let mut err = sess.dcx().create_err(FeatureGateError { span, explain: explain.into() });
add_feature_diagnostics_for_issue(&mut err, sess, feature, issue, false, None);
err
}
/// Construct a future incompatibility diagnostic for a feature gate.
///
/// This diagnostic is only a warning and *does not cause compilation to fail*.
#[track_caller]
pub fn feature_warn(sess: &Session, feature: Symbol, span: Span, explain: &'static str) {
feature_warn_issue(sess, feature, span, GateIssue::Language, explain);
}
/// Construct a future incompatibility diagnostic for a feature gate.
///
/// This diagnostic is only a warning and *does not cause compilation to fail*.
///
/// This variant allows you to control whether it is a library or language feature.
/// Almost always, you want to use this for a language feature. If so, prefer `feature_warn`.
#[track_caller]
pub fn feature_warn_issue(
sess: &Session,
feature: Symbol,
span: Span,
issue: GateIssue,
explain: &'static str,
) {
let mut err = sess.dcx().struct_span_warn(span, explain);
add_feature_diagnostics_for_issue(&mut err, sess, feature, issue, false, None);
// Decorate this as a future-incompatibility lint as in rustc_middle::lint::lint_level
let lint = UNSTABLE_SYNTAX_PRE_EXPANSION;
let future_incompatible = lint.future_incompatible.as_ref().unwrap();
err.is_lint(lint.name_lower(), /* has_future_breakage */ false);
err.warn(lint.desc);
err.note(format!("for more information, see {}", future_incompatible.reason.reference()));
// A later feature_err call can steal and cancel this warning.
err.stash(span, StashKey::EarlySyntaxWarning);
}
/// Adds the diagnostics for a feature to an existing error.
/// Must be a language feature!
pub fn add_feature_diagnostics<G: EmissionGuarantee>(
err: &mut Diag<'_, G>,
sess: &Session,
feature: Symbol,
) {
add_feature_diagnostics_for_issue(err, sess, feature, GateIssue::Language, false, None);
}
/// Adds the diagnostics for a feature to an existing error.
///
/// This variant allows you to control whether it is a library or language feature.
/// Almost always, you want to use this for a language feature. If so, prefer
/// `add_feature_diagnostics`.
pub fn add_feature_diagnostics_for_issue<G: EmissionGuarantee>(
err: &mut Diag<'_, G>,
sess: &Session,
feature: Symbol,
issue: GateIssue,
feature_from_cli: bool,
inject_span: Option<Span>,
) {
if let Some(n) = find_feature_issue(feature, issue) {
err.subdiagnostic(FeatureDiagnosticForIssue { n });
}
// #23973: do not suggest `#![feature(...)]` if we are in beta/stable
if sess.psess.unstable_features.is_nightly_build() {
if feature_from_cli {
err.subdiagnostic(CliFeatureDiagnosticHelp { feature });
} else if let Some(span) = inject_span {
err.subdiagnostic(FeatureDiagnosticSuggestion { feature, span });
} else {
err.subdiagnostic(FeatureDiagnosticHelp { feature });
}
if feature == sym::rustc_attrs {
// We're unlikely to stabilize something out of `rustc_attrs`
// without at least renaming it, so pointing out how old
// the compiler is will do little good.
} else if sess.opts.unstable_opts.ui_testing {
err.subdiagnostic(SuggestUpgradeCompiler::ui_testing());
} else if let Some(suggestion) = SuggestUpgradeCompiler::new() {
err.subdiagnostic(suggestion);
}
}
}
/// This is only used by unstable_feature_bound as it does not have issue number information for now.
/// This is basically the same as `feature_err_issue`
/// but without the feature issue note. If we can do a lookup for issue number from feature name,
/// then we should directly use `feature_err_issue` for ambiguity error of
/// `#[unstable_feature_bound]`.
#[track_caller]
pub fn feature_err_unstable_feature_bound(
sess: &Session,
feature: Symbol,
span: impl Into<MultiSpan>,
explain: impl Into<DiagMessage>,
) -> Diag<'_> {
let span = span.into();
// Cancel an earlier warning for this same error, if it exists.
if let Some(span) = span.primary_span() {
if let Some(err) = sess.dcx().steal_non_err(span, StashKey::EarlySyntaxWarning) {
err.cancel()
}
}
let mut err = sess.dcx().create_err(FeatureGateError { span, explain: explain.into() });
// #23973: do not suggest `#![feature(...)]` if we are in beta/stable
if sess.psess.unstable_features.is_nightly_build() {
err.subdiagnostic(FeatureDiagnosticHelp { feature });
if feature == sym::rustc_attrs {
// We're unlikely to stabilize something out of `rustc_attrs`
// without at least renaming it, so pointing out how old
// the compiler is will do little good.
} else if sess.opts.unstable_opts.ui_testing {
err.subdiagnostic(SuggestUpgradeCompiler::ui_testing());
} else if let Some(suggestion) = SuggestUpgradeCompiler::new() {
err.subdiagnostic(suggestion);
}
}
err
}
/// Info about a parsing session.
pub struct ParseSess {
dcx: DiagCtxt,
pub unstable_features: UnstableFeatures,
pub config: Cfg,
pub check_config: CheckCfg,
pub edition: Edition,
/// Places where raw identifiers were used. This is used to avoid complaining about idents
/// clashing with keywords in new editions.
pub raw_identifier_spans: AppendOnlyVec<Span>,
/// Places where identifiers that contain invalid Unicode codepoints but that look like they
/// should be. Useful to avoid bad tokenization when encountering emoji. We group them to
/// provide a single error per unique incorrect identifier.
pub bad_unicode_identifiers: Lock<FxIndexMap<Symbol, Vec<Span>>>,
source_map: Arc<SourceMap>,
pub buffered_lints: Lock<Vec<BufferedEarlyLint>>,
/// Contains the spans of block expressions that could have been incomplete based on the
/// operation token that followed it, but that the parser cannot identify without further
/// analysis.
pub ambiguous_block_expr_parse: Lock<FxIndexMap<Span, Span>>,
pub gated_spans: GatedSpans,
pub symbol_gallery: SymbolGallery,
/// Environment variables accessed during the build and their values when they exist.
pub env_depinfo: Lock<FxIndexSet<(Symbol, Option<Symbol>)>>,
/// File paths accessed during the build.
pub file_depinfo: Lock<FxIndexSet<Symbol>>,
/// Whether cfg(version) should treat the current release as incomplete
pub assume_incomplete_release: bool,
/// Spans passed to `proc_macro::quote_span`. Each span has a numerical
/// identifier represented by its position in the vector.
proc_macro_quoted_spans: AppendOnlyVec<Span>,
/// Used to generate new `AttrId`s. Every `AttrId` is unique.
pub attr_id_generator: AttrIdGenerator,
}
impl ParseSess {
/// Used for testing.
pub fn new() -> Self {
let translator = Translator::new();
let sm = Arc::new(SourceMap::new(FilePathMapping::empty()));
let emitter = Box::new(
AnnotateSnippetEmitter::new(stderr_destination(ColorConfig::Auto), translator)
.sm(Some(Arc::clone(&sm))),
);
let dcx = DiagCtxt::new(emitter);
ParseSess::with_dcx(dcx, sm)
}
pub fn with_dcx(dcx: DiagCtxt, source_map: Arc<SourceMap>) -> Self {
Self {
dcx,
unstable_features: UnstableFeatures::from_environment(None),
config: Cfg::default(),
check_config: CheckCfg::default(),
edition: ExpnId::root().expn_data().edition,
raw_identifier_spans: Default::default(),
bad_unicode_identifiers: Lock::new(Default::default()),
source_map,
buffered_lints: Lock::new(vec![]),
ambiguous_block_expr_parse: Lock::new(Default::default()),
gated_spans: GatedSpans::default(),
symbol_gallery: SymbolGallery::default(),
env_depinfo: Default::default(),
file_depinfo: Default::default(),
assume_incomplete_release: false,
proc_macro_quoted_spans: Default::default(),
attr_id_generator: AttrIdGenerator::new(),
}
}
pub fn emitter_with_note(note: String) -> Self {
let translator = Translator::new();
let sm = Arc::new(SourceMap::new(FilePathMapping::empty()));
let emitter = Box::new(AnnotateSnippetEmitter::new(
stderr_destination(ColorConfig::Auto),
translator,
));
let dcx = DiagCtxt::new(Box::new(EmitterWithNote { emitter, note }));
ParseSess::with_dcx(dcx, sm)
}
#[inline]
pub fn source_map(&self) -> &SourceMap {
&self.source_map
}
pub fn clone_source_map(&self) -> Arc<SourceMap> {
Arc::clone(&self.source_map)
}
pub fn buffer_lint(
&self,
lint: &'static Lint,
span: impl Into<MultiSpan>,
node_id: NodeId,
diagnostic: impl Into<DecorateDiagCompat>,
) {
self.opt_span_buffer_lint(lint, Some(span.into()), node_id, diagnostic.into())
}
pub(crate) fn opt_span_buffer_lint(
&self,
lint: &'static Lint,
span: Option<MultiSpan>,
node_id: NodeId,
diagnostic: DecorateDiagCompat,
) {
self.buffered_lints.with_lock(|buffered_lints| {
buffered_lints.push(BufferedEarlyLint {
span,
node_id,
lint_id: LintId::of(lint),
diagnostic,
});
});
}
pub fn save_proc_macro_span(&self, span: Span) -> usize {
self.proc_macro_quoted_spans.push(span)
}
pub fn proc_macro_quoted_spans(&self) -> impl Iterator<Item = (usize, Span)> {
// This is equivalent to `.iter().copied().enumerate()`, but that isn't possible for
// AppendOnlyVec, so we resort to this scheme.
self.proc_macro_quoted_spans.iter_enumerated()
}
pub fn dcx(&self) -> DiagCtxtHandle<'_> {
self.dcx.handle()
}
} | rust | github | https://github.com/rust-lang/rust | compiler/rustc_session/src/parse.rs |
/*
* Copyright 2014-2026 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.server.auth
import io.ktor.http.HttpHeaders
import io.ktor.http.HttpMethod
import io.ktor.http.auth.*
import io.ktor.server.application.*
import io.ktor.server.request.*
import io.ktor.server.response.*
import io.ktor.util.*
import io.ktor.utils.io.charsets.*
import java.security.MessageDigest
/**
* A `digest` [Authentication] provider.
*
* This provider supports:
* - Multiple hash algorithms: MD5, SHA-256, SHA-512-256 (and their session variants)
* - Quality of Protection (qop): auth and auth-int
* - User hash for privacy protection
* - UTF-8 charset support
*
* **Security Note**: SHA-512-256 is the recommended hash algorithm for new implementations.
* While MD5 is supported for backward compatibility, it is deprecated and should be avoided
* in production. Consider enabling [Config.strictRfc7616Mode] to enforce stronger algorithms.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider)
*
* @property realm specifies the value to be passed in the `WWW-Authenticate` header.
*/
public class DigestAuthenticationProvider internal constructor(
config: Config
) : AuthenticationProvider(config) {
private val realm: String = config.realm
private val algorithms: List<DigestAlgorithm> = config.algorithms.also {
require(it.isNotEmpty()) { "At least one algorithm must be specified" }
}
private val qopValues = config.supportedQop.map { it.value }
private val charset: Charset = config.charset
private val userHashResolver: UserHashResolverFunction? = config.userHashResolver
private val nonceManager: NonceManager = config.nonceManager
private val userNameRealmPasswordDigestProvider: DigestProviderFunctionV2 = requireNotNull(config.digestProvider) {
"Digest provider function should be specified"
}
private val authenticationFunction: AuthenticationFunction<DigestCredential> = config.authenticationFunction
override suspend fun onAuthenticate(context: AuthenticationContext) {
val call = context.call
val authorizationHeader = call.request.parseAuthorizationHeader()
val credentials = authorizationHeader?.let { authHeader ->
if (authHeader.authScheme == AuthScheme.Digest && authHeader is HttpAuthHeader.Parameterized) {
authHeader.toDigestCredential(defaultCharset = charset)
} else {
null
}
}
// Store HA1 for use in the Authentication-Info header
var verifiedHa1: ByteArray? = null
var verifiedBodyHash: ByteArray? = null
val verify: suspend (DigestCredential) -> Boolean = { credential ->
val userDigest =
userNameRealmPasswordDigestProvider(credential.userName, credential.realm, credential.digestAlgorithm)
val ha1 = credential.computeHA1(userNameRealmPasswordDigest = userDigest ?: ByteArray(0))
val entityBodyHash = when {
credentials?.qop == DigestQop.AUTH_INT.value -> call.computeBodyHash(credential.digester)
else -> null
}
verifiedHa1 = ha1
verifiedBodyHash = entityBodyHash
credential.verifyWithHA1(call.request.local.method, ha1, entityBodyHash) && userDigest != null
}
suspend fun DigestCredential.resolveUserHash(): DigestCredential? {
if (!userHash) return this
val userName = userHashResolver?.invoke(userName, realm, digestAlgorithm) ?: return null
return copy(userName = userName, userHash = false)
}
val principal = credentials?.let { c ->
val credential = c.resolveUserHash() ?: return@let null
if (algorithms.any { it === credential.digestAlgorithm } &&
credential.realm == realm &&
nonceManager.verifyNonce(credential.nonce) &&
validateQop(credential.qop) &&
verify(credential)
) {
call.authenticationFunction(credential)
} else {
null
}
}
when (principal) {
null -> {
val cause = when (credentials) {
null -> AuthenticationFailedCause.NoCredentials
else -> AuthenticationFailedCause.InvalidCredentials
}
@Suppress("NAME_SHADOWING")
context.challenge(digestAuthenticationChallengeKey, cause) { challenge, call ->
val supportsUserHash = userHashResolver != null
val challenges = algorithms.map { algorithm ->
HttpAuthHeader.digestAuthChallenge(
nonce = nonceManager.newNonce(),
userhash = supportsUserHash,
charset = charset.takeIf { it == Charsets.UTF_8 }, // only UTF-8 can be advertised
algorithm = algorithm,
qop = qopValues,
realm = realm
)
}
call.respond(UnauthorizedResponse(*challenges.toTypedArray()))
challenge.complete()
}
}
else -> {
// Add Authentication-Info header for successful authentication when qop is used
if (credentials.qop != null) {
val authInfo = credentials.buildAuthenticationInfoHeader(
ha1 = verifiedHa1!!,
nextNonce = nonceManager.newNonce(),
responseBodyHash = verifiedBodyHash
)
call.response.header(HttpHeaders.AuthenticationInfo, authInfo)
}
context.principal(name, principal)
}
}
}
/**
* Validates that this server supports the client's qop value.
*
* Per RFC 2617 backward compatibility, if the client doesn't send qop,
* authentication can still proceed to use the legacy format.
*/
private fun validateQop(clientQop: String?): Boolean {
// If the client didn't send qop, allow it for RFC 2617 backward compatibility
// If the client sent a qop, it must be the one the server supports
return clientQop == null || qopValues.any { it == clientQop }
}
/**
* Computes the hash of the request entity body for auth-int verification.
*/
private suspend fun ApplicationCall.computeBodyHash(digester: MessageDigest): ByteArray {
// Note: This consumes the body. Users should install the DoubleReceive plugin.
val bodyBytes = runCatching { receive<ByteArray>() }.getOrNull()
?: ByteArray(0)
digester.reset()
return digester.digest(bodyBytes)
}
/**
* A configuration for the [digest] authentication provider.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config)
*/
public class Config internal constructor(
name: String?,
description: String?
) : AuthenticationProvider.Config(name, description) {
internal var digestProvider: DigestProviderFunctionV2? = null
internal var authenticationFunction: AuthenticationFunction<DigestCredential> = { UserIdPrincipal(it.userName) }
/**
* Specifies a realm to be passed in the `WWW-Authenticate` header.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.realm)
*/
public var realm: String = "Ktor Server"
/**
* A message digest algorithm to be used. Usually only `MD5` is supported by clients.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.algorithmName)
*/
@Deprecated("Use algorithms instead", ReplaceWith("algorithms"))
public var algorithmName: String
get() = algorithms.first().hashName
set(value) {
val digestAlgorithm = requireNotNull(DigestAlgorithm.from(value)) {
"Unsupported digest algorithm: $value"
}
algorithms = listOf(digestAlgorithm)
}
/**
* List of message digest algorithms to be used.
*
* Supported algorithms:
* - `MD5` default, for backward compatibility (deprecated, avoid it in production)
* - `MD5-sess` session variant, deprecated
* - `SHA-256` recommended minimum for production use
* - `SHA-256-sess` session variant
* - `SHA-512-256` **recommended for new implementations** (provides the strongest security)
* - `SHA-512-256-sess` session variant with the strongest security
*
* When multiple algorithms are configured, the server will send multiple
* `WWW-Authenticate` headers, one per algorithm, allowing the client to choose.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.algorithms)
*/
public var algorithms: List<DigestAlgorithm> = defaultAlgorithms
/**
* List of supported Quality of Protection (qop) options.
*
* The server can advertise support for:
* - [DigestQop.AUTH] - Authentication only (default)
* - [DigestQop.AUTH_INT] - Authentication with integrity protection
*
* When [DigestQop.AUTH_INT] is used, the request body is included in the digest
* calculation, providing integrity protection. Note that using `auth-int` requires
* reading the request body during authentication. If you need to access the body
* in your route handler, install the `DoubleReceive` plugin.
*
* An empty list means qop is not required.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.supportedQop)
*/
public var supportedQop: List<DigestQop> = listOf(DigestQop.AUTH)
/**
* The charset to be used. If set to `UTF-8`, it will be passed in the `WWW-Authenticate` header.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.charset)
*/
public var charset: Charset = Charsets.UTF_8
internal var userHashResolver: UserHashResolverFunction? = null
/**
* [NonceManager] to be used to generate nonce values.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.nonceManager)
*/
public var nonceManager: NonceManager = GenerateOnlyNonceManager
/**
* Sets a validation function that checks a specified [DigestCredential] instance and
* returns principal [Any] in a case of successful authentication or null if authentication fails.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.validate)
*/
public fun validate(body: AuthenticationFunction<DigestCredential>) {
authenticationFunction = body
}
/**
* Configures a digest provider function that should fetch or compute message digest for the specified
* `userName` and `realm`. A message digest is usually computed based on username, realm, and password
* concatenated with the colon character ':'. For example, `"$userName:$realm:$password"`.
*
* **Note**: This overload does not receive the algorithm parameter. Consider using the
* [digestProvider] overload that accepts [DigestAlgorithm] for full RFC 7616 support.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.digestProvider)
*/
public fun digestProvider(digest: DigestProviderFunction) {
digestProvider = { userName, realm, _ -> digest(userName, realm) }
}
/**
* Configures a digest provider function that should fetch or compute message digest for the specified
* `userName`, `realm`, and `algorithm`.
*
* The digest should be computed as `H(username:realm:password)` using the specified algorithm's hash function.
*
* @see [DigestAlgorithm] for supported algorithms
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.digestProvider)
*/
public fun digestProvider(digest: DigestProviderFunctionV2) {
digestProvider = digest
}
/**
* Configures a resolver function for userhash support.
*
* When a client sends `userhash=true`, the username parameter contains `H(username:realm)`
* instead of the actual username. This resolver is called to find the actual username
* from the hash.
*
* When set, the server will include `userhash=true` in the WWW-Authenticate challenge header,
* indicating to clients that they may send hashed usernames.
*
* Example implementation using a list of known users:
* ```kotlin
* val users = listOf("alice", "bob", "charlie")
*
* userHashResolver { userhash, realm, algorithm ->
* users.find { username ->
* computeUserHash(username, realm, algorithm) == userhash
* }
* }
* ```
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.userHashResolver)
*/
public fun userHashResolver(resolver: UserHashResolverFunction) {
userHashResolver = resolver
}
/**
* Enables strict RFC 7616 compliance mode by setting the algorithms to SHA-512-256 and SHA-256, and charset to UTF-8.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestAuthenticationProvider.Config.strictRfc7616Mode)
*/
public fun strictRfc7616Mode() {
@Suppress("DEPRECATION")
if (DigestAlgorithm.MD5 in algorithms || DigestAlgorithm.MD5_SESS in algorithms) {
if (algorithms !== defaultAlgorithms) {
LOGGER.warn("MD5 algorithms are overridden in strictRfc7616Mode")
}
algorithms = algorithms.filter { it != DigestAlgorithm.MD5 && it != DigestAlgorithm.MD5_SESS }
}
if (charset != Charsets.UTF_8) {
LOGGER.warn("Defined charset is overridden in strictRfc7616Mode")
}
}
internal companion object {
@Suppress("DEPRECATION")
val defaultAlgorithms = listOf(DigestAlgorithm.SHA_512_256, DigestAlgorithm.MD5)
}
}
}
/**
* Provides a message digest for the specified username and realm or returns `null` if a user is missing.
* This function could fetch digest from a database or compute it instead.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestProviderFunction)
*/
public typealias DigestProviderFunction = suspend (userName: String, realm: String) -> ByteArray?
/**
* Provides a message digest for the specified username, realm, and algorithm or returns `null` if a user is missing.
* This function could fetch digest from a database or compute it instead.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.DigestProviderFunctionV2)
*/
public typealias DigestProviderFunctionV2 =
suspend (userName: String, realm: String, algorithm: DigestAlgorithm) -> ByteArray?
/**
* Resolves a userhash to the actual username for userhash support.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.UserHashResolverFunction)
*/
public typealias UserHashResolverFunction =
suspend (userHash: String, realm: String, algorithm: DigestAlgorithm) -> String?
/**
* Installs the digest [Authentication] provider.
* To learn how to configure it, see [Digest authentication](https://ktor.io/docs/digest.html).
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.digest)
*/
public fun AuthenticationConfig.digest(
name: String? = null,
configure: DigestAuthenticationProvider.Config.() -> Unit
) {
digest(name, description = null, configure)
}
/**
* Installs the digest [Authentication] provider with description.
* To learn how to configure it, see [Digest authentication](https://ktor.io/docs/digest.html).
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.digest)
*/
public fun AuthenticationConfig.digest(
name: String? = null,
description: String? = null,
configure: DigestAuthenticationProvider.Config.() -> Unit
) {
val provider = DigestAuthenticationProvider(DigestAuthenticationProvider.Config(name, description).apply(configure))
register(provider)
}
private val digestAuthenticationChallengeKey: Any = "DigestAuth"
/**
* Retrieves [DigestCredential] for this call.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.digestAuthenticationCredentials)
*/
public fun ApplicationCall.digestAuthenticationCredentials(): DigestCredential? {
return request.parseAuthorizationHeader()?.let { authHeader ->
if (authHeader.authScheme == AuthScheme.Digest && authHeader is HttpAuthHeader.Parameterized) {
return authHeader.toDigestCredential()
} else {
null
}
}
}
@Deprecated("Maintained binary compatibility", level = DeprecationLevel.HIDDEN)
public fun HttpAuthHeader.Parameterized.toDigestCredential(): DigestCredential =
toDigestCredential(defaultCharset = Charsets.ISO_8859_1)
/**
* Verifies that credentials are valid for a given [method], [digester], and [userNameRealmPasswordDigest].
*
* This is the legacy verifier that does not support session algorithms or auth-int.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.verifier)
*/
@Deprecated(message = "Use [DigestCredential.verifier] without digester.", level = DeprecationLevel.WARNING)
public suspend fun DigestCredential.verifier(
method: HttpMethod,
digester: MessageDigest,
userNameRealmPasswordDigest: suspend (String, String) -> ByteArray?
): Boolean {
require(digester.algorithm == digestAlgorithm.hashName) { "Wrong digest algorithm" }
return verifier(method, userNameRealmPasswordDigest)
}
/**
* Calculates the expected digest bytes for this [DigestCredential].
*
* This is the legacy function that does not support session algorithms or auth-int.
* For full RFC 7616 support, use the overload with an algorithm parameter.
*
* [Report a problem](https://ktor.io/feedback/?fqname=io.ktor.server.auth.expectedDigest)
*/
@Deprecated(message = "Use [DigestCredential.expectedDigest] without digester.", level = DeprecationLevel.WARNING)
public fun DigestCredential.expectedDigest(
method: HttpMethod,
digester: MessageDigest,
userNameRealmPasswordDigest: ByteArray
): ByteArray {
require(digestAlgorithm.hashName == digester.algorithm) { "Wrong digest algorithm" }
return expectedDigest(method, userNameRealmPasswordDigest)
} | kotlin | github | https://github.com/ktorio/ktor | ktor-server/ktor-server-plugins/ktor-server-auth/jvm/src/io/ktor/server/auth/DigestAuth.kt |
import sys
import types
import unittest
class Test_TestLoader(unittest.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest.TestCase):
def foo_bar(self): pass
empty_suite = unittest.TestSuite()
loader = unittest.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest.TestSuite):
pass
loader = unittest.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
loader = unittest.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEqual(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegex(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('unittest.sdasfasfasdf')
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('', unittest)
except AttributeError as e:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames([], unittest)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError as e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError as e:
self.assertEqual(str(e), "No module named 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['unittest.sdasfasfasdf', 'unittest'])
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest)
except AttributeError as e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames([''], unittest)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testsuite = unittest.TestSuite([MyTestCase('test')])
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError as e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
testcase_2 = unittest.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest.TestCase):
@staticmethod
def foo():
return testcase_1
m.Foo = Foo
loader = unittest.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest.TestCase):
def foobar(self): pass
loader = unittest.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([Foo('foo_bar')])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest.TestSuite([unittest.TestSuite([Foo('foo_bar')])])
tests_2 = unittest.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest.TestSuite([tests_2])
loader = unittest.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest.TestLoader()
self.assertEqual(loader.testMethodPrefix, 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -((x > y) - (x < y))
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
# Since cmp is now defunct, we simply verify that the results
# occur in the same order as they would with the default sort.
def test_sortTestMethodsUsing__default_value(self):
loader = unittest.TestLoader()
class Foo(unittest.TestCase):
def test_2(self): pass
def test_3(self): pass
def test_1(self): pass
test_names = ['test_2', 'test_3', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), sorted(test_names))
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest.TestLoader()
self.assertTrue(loader.suiteClass is unittest.TestSuite) | unknown | codeparrot/codeparrot-clean | ||
// SPDX-License-Identifier: GPL-2.0-or-later
/* audit.c -- Auditing support
* Gateway between the kernel (e.g., selinux) and the user-space audit daemon.
* System-call specific features have moved to auditsc.c
*
* Copyright 2003-2007 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
* Written by Rickard E. (Rik) Faith <faith@redhat.com>
*
* Goals: 1) Integrate fully with Security Modules.
* 2) Minimal run-time overhead:
* a) Minimal when syscall auditing is disabled (audit_enable=0).
* b) Small when syscall auditing is enabled and no audit record
* is generated (defer as much work as possible to record
* generation time):
* i) context is allocated,
* ii) names from getname are stored without a copy, and
* iii) inode information stored from path_lookup.
* 3) Ability to disable syscall auditing at boot time (audit=0).
* 4) Usable by other parts of the kernel (if audit_log* is called,
* then a syscall record will be generated automatically for the
* current syscall).
* 5) Netlink interface to user-space.
* 6) Support low-overhead kernel-based filtering to minimize the
* information that must be passed to user-space.
*
* Audit userspace, documentation, tests, and bug/issue trackers:
* https://github.com/linux-audit
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/file.h>
#include <linux/hex.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/gfp.h>
#include <linux/pid.h>
#include <linux/audit.h>
#include <net/sock.h>
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/security.h>
#include <linux/lsm_hooks.h>
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <net/netns/generic.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/sctp.h>
#include "audit.h"
/* No auditing will take place until audit_initialized == AUDIT_INITIALIZED.
* (Initialization happens after skb_init is called.) */
#define AUDIT_DISABLED -1
#define AUDIT_UNINITIALIZED 0
#define AUDIT_INITIALIZED 1
static int audit_initialized = AUDIT_UNINITIALIZED;
u32 audit_enabled = AUDIT_OFF;
bool audit_ever_enabled = !!AUDIT_OFF;
EXPORT_SYMBOL_GPL(audit_enabled);
/* Default state when kernel boots without any parameters. */
static u32 audit_default = AUDIT_OFF;
/* If auditing cannot proceed, audit_failure selects what happens. */
static u32 audit_failure = AUDIT_FAIL_PRINTK;
/* private audit network namespace index */
static unsigned int audit_net_id;
/* Number of modules that provide a security context.
List of lsms that provide a security context */
static u32 audit_subj_secctx_cnt;
static u32 audit_obj_secctx_cnt;
static const struct lsm_id *audit_subj_lsms[MAX_LSM_COUNT];
static const struct lsm_id *audit_obj_lsms[MAX_LSM_COUNT];
/**
* struct audit_net - audit private network namespace data
* @sk: communication socket
*/
struct audit_net {
struct sock *sk;
};
/**
* struct auditd_connection - kernel/auditd connection state
* @pid: auditd PID
* @portid: netlink portid
* @net: the associated network namespace
* @rcu: RCU head
*
* Description:
* This struct is RCU protected; you must either hold the RCU lock for reading
* or the associated spinlock for writing.
*/
struct auditd_connection {
struct pid *pid;
u32 portid;
struct net *net;
struct rcu_head rcu;
};
static struct auditd_connection __rcu *auditd_conn;
static DEFINE_SPINLOCK(auditd_conn_lock);
/* If audit_rate_limit is non-zero, limit the rate of sending audit records
* to that number per second. This prevents DoS attacks, but results in
* audit records being dropped. */
static u32 audit_rate_limit;
/* Number of outstanding audit_buffers allowed.
* When set to zero, this means unlimited. */
static u32 audit_backlog_limit = 64;
#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
static u32 audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
/* The identity of the user shutting down the audit system. */
static kuid_t audit_sig_uid = INVALID_UID;
static pid_t audit_sig_pid = -1;
static struct lsm_prop audit_sig_lsm;
/* Records can be lost in several ways:
0) [suppressed in audit_alloc]
1) out of memory in audit_log_start [kmalloc of struct audit_buffer]
2) out of memory in audit_log_move [alloc_skb]
3) suppressed due to audit_rate_limit
4) suppressed due to audit_backlog_limit
*/
static atomic_t audit_lost = ATOMIC_INIT(0);
/* Monotonically increasing sum of time the kernel has spent
* waiting while the backlog limit is exceeded.
*/
static atomic_t audit_backlog_wait_time_actual = ATOMIC_INIT(0);
/* Hash for inode-based rules */
struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS];
static struct kmem_cache *audit_buffer_cache;
/* queue msgs to send via kauditd_task */
static struct sk_buff_head audit_queue;
/* queue msgs due to temporary unicast send problems */
static struct sk_buff_head audit_retry_queue;
/* queue msgs waiting for new auditd connection */
static struct sk_buff_head audit_hold_queue;
/* queue servicing thread */
static struct task_struct *kauditd_task;
static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
/* waitqueue for callers who are blocked on the audit backlog */
static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
static struct audit_features af = {.vers = AUDIT_FEATURE_VERSION,
.mask = -1,
.features = 0,
.lock = 0,};
static char *audit_feature_names[2] = {
"only_unset_loginuid",
"loginuid_immutable",
};
/**
* struct audit_ctl_mutex - serialize requests from userspace
* @lock: the mutex used for locking
* @owner: the task which owns the lock
*
* Description:
* This is the lock struct used to ensure we only process userspace requests
* in an orderly fashion. We can't simply use a mutex/lock here because we
* need to track lock ownership so we don't end up blocking the lock owner in
* audit_log_start() or similar.
*/
static struct audit_ctl_mutex {
struct mutex lock;
void *owner;
} audit_cmd_mutex;
/* AUDIT_BUFSIZ is the size of the temporary buffer used for formatting
* audit records. Since printk uses a 1024 byte buffer, this buffer
* should be at least that large. */
#define AUDIT_BUFSIZ 1024
/* The audit_buffer is used when formatting an audit record. The caller
* locks briefly to get the record off the freelist or to allocate the
* buffer, and locks briefly to send the buffer to the netlink layer or
* to place it on a transmit queue. Multiple audit_buffers can be in
* use simultaneously. */
struct audit_buffer {
struct sk_buff *skb; /* the skb for audit_log functions */
struct sk_buff_head skb_list; /* formatted skbs, ready to send */
struct audit_context *ctx; /* NULL or associated context */
struct audit_stamp stamp; /* audit stamp for these records */
gfp_t gfp_mask;
};
struct audit_reply {
__u32 portid;
struct net *net;
struct sk_buff *skb;
};
/**
* auditd_test_task - Check to see if a given task is an audit daemon
* @task: the task to check
*
* Description:
* Return 1 if the task is a registered audit daemon, 0 otherwise.
*/
int auditd_test_task(struct task_struct *task)
{
int rc;
struct auditd_connection *ac;
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
rc = (ac && ac->pid == task_tgid(task) ? 1 : 0);
rcu_read_unlock();
return rc;
}
/**
* audit_ctl_lock - Take the audit control lock
*/
void audit_ctl_lock(void)
{
mutex_lock(&audit_cmd_mutex.lock);
audit_cmd_mutex.owner = current;
}
/**
* audit_ctl_unlock - Drop the audit control lock
*/
void audit_ctl_unlock(void)
{
audit_cmd_mutex.owner = NULL;
mutex_unlock(&audit_cmd_mutex.lock);
}
/**
* audit_ctl_owner_current - Test to see if the current task owns the lock
*
* Description:
* Return true if the current task owns the audit control lock, false if it
* doesn't own the lock.
*/
static bool audit_ctl_owner_current(void)
{
return (current == audit_cmd_mutex.owner);
}
/**
* auditd_pid_vnr - Return the auditd PID relative to the namespace
*
* Description:
* Returns the PID in relation to the namespace, 0 on failure.
*/
static pid_t auditd_pid_vnr(void)
{
pid_t pid;
const struct auditd_connection *ac;
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac || !ac->pid)
pid = 0;
else
pid = pid_vnr(ac->pid);
rcu_read_unlock();
return pid;
}
/**
* audit_cfg_lsm - Identify a security module as providing a secctx.
* @lsmid: LSM identity
* @flags: which contexts are provided
*
* Description:
* Increments the count of the security modules providing a secctx.
* If the LSM id is already in the list leave it alone.
*/
void audit_cfg_lsm(const struct lsm_id *lsmid, int flags)
{
int i;
if (flags & AUDIT_CFG_LSM_SECCTX_SUBJECT) {
for (i = 0 ; i < audit_subj_secctx_cnt; i++)
if (audit_subj_lsms[i] == lsmid)
return;
audit_subj_lsms[audit_subj_secctx_cnt++] = lsmid;
}
if (flags & AUDIT_CFG_LSM_SECCTX_OBJECT) {
for (i = 0 ; i < audit_obj_secctx_cnt; i++)
if (audit_obj_lsms[i] == lsmid)
return;
audit_obj_lsms[audit_obj_secctx_cnt++] = lsmid;
}
}
/**
* audit_get_sk - Return the audit socket for the given network namespace
* @net: the destination network namespace
*
* Description:
* Returns the sock pointer if valid, NULL otherwise. The caller must ensure
* that a reference is held for the network namespace while the sock is in use.
*/
static struct sock *audit_get_sk(const struct net *net)
{
struct audit_net *aunet;
if (!net)
return NULL;
aunet = net_generic(net, audit_net_id);
return aunet->sk;
}
void audit_panic(const char *message)
{
switch (audit_failure) {
case AUDIT_FAIL_SILENT:
break;
case AUDIT_FAIL_PRINTK:
if (printk_ratelimit())
pr_err("%s\n", message);
break;
case AUDIT_FAIL_PANIC:
panic("audit: %s\n", message);
break;
}
}
static inline int audit_rate_check(void)
{
static unsigned long last_check = 0;
static int messages = 0;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
int retval = 0;
if (!audit_rate_limit)
return 1;
spin_lock_irqsave(&lock, flags);
if (++messages < audit_rate_limit) {
retval = 1;
} else {
now = jiffies;
if (time_after(now, last_check + HZ)) {
last_check = now;
messages = 0;
retval = 1;
}
}
spin_unlock_irqrestore(&lock, flags);
return retval;
}
/**
* audit_log_lost - conditionally log lost audit message event
* @message: the message stating reason for lost audit message
*
* Emit at least 1 message per second, even if audit_rate_check is
* throttling.
* Always increment the lost messages counter.
*/
void audit_log_lost(const char *message)
{
static unsigned long last_msg = 0;
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned long now;
int print;
atomic_inc(&audit_lost);
print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
if (!print) {
spin_lock_irqsave(&lock, flags);
now = jiffies;
if (time_after(now, last_msg + HZ)) {
print = 1;
last_msg = now;
}
spin_unlock_irqrestore(&lock, flags);
}
if (print) {
if (printk_ratelimit())
pr_warn("audit_lost=%u audit_rate_limit=%u audit_backlog_limit=%u\n",
atomic_read(&audit_lost),
audit_rate_limit,
audit_backlog_limit);
audit_panic(message);
}
}
static int audit_log_config_change(char *function_name, u32 new, u32 old,
int allow_changes)
{
struct audit_buffer *ab;
int rc = 0;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_CONFIG_CHANGE);
if (unlikely(!ab))
return rc;
audit_log_format(ab, "op=set %s=%u old=%u ", function_name, new, old);
audit_log_session_info(ab);
rc = audit_log_task_context(ab);
if (rc)
allow_changes = 0; /* Something weird, deny request */
audit_log_format(ab, " res=%d", allow_changes);
audit_log_end(ab);
return rc;
}
static int audit_do_config_change(char *function_name, u32 *to_change, u32 new)
{
int allow_changes, rc = 0;
u32 old = *to_change;
/* check if we are locked */
if (audit_enabled == AUDIT_LOCKED)
allow_changes = 0;
else
allow_changes = 1;
if (audit_enabled != AUDIT_OFF) {
rc = audit_log_config_change(function_name, new, old, allow_changes);
if (rc)
allow_changes = 0;
}
/* If we are allowed, make the change */
if (allow_changes == 1)
*to_change = new;
/* Not allowed, update reason */
else if (rc == 0)
rc = -EPERM;
return rc;
}
static int audit_set_rate_limit(u32 limit)
{
return audit_do_config_change("audit_rate_limit", &audit_rate_limit, limit);
}
static int audit_set_backlog_limit(u32 limit)
{
return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit, limit);
}
static int audit_set_backlog_wait_time(u32 timeout)
{
return audit_do_config_change("audit_backlog_wait_time",
&audit_backlog_wait_time, timeout);
}
static int audit_set_enabled(u32 state)
{
int rc;
if (state > AUDIT_LOCKED)
return -EINVAL;
rc = audit_do_config_change("audit_enabled", &audit_enabled, state);
if (!rc)
audit_ever_enabled |= !!state;
return rc;
}
static int audit_set_failure(u32 state)
{
if (state != AUDIT_FAIL_SILENT
&& state != AUDIT_FAIL_PRINTK
&& state != AUDIT_FAIL_PANIC)
return -EINVAL;
return audit_do_config_change("audit_failure", &audit_failure, state);
}
/**
* auditd_conn_free - RCU helper to release an auditd connection struct
* @rcu: RCU head
*
* Description:
* Drop any references inside the auditd connection tracking struct and free
* the memory.
*/
static void auditd_conn_free(struct rcu_head *rcu)
{
struct auditd_connection *ac;
ac = container_of(rcu, struct auditd_connection, rcu);
put_pid(ac->pid);
put_net(ac->net);
kfree(ac);
}
/**
* auditd_set - Set/Reset the auditd connection state
* @pid: auditd PID
* @portid: auditd netlink portid
* @net: auditd network namespace pointer
* @skb: the netlink command from the audit daemon
* @ack: netlink ack flag, cleared if ack'd here
*
* Description:
* This function will obtain and drop network namespace references as
* necessary. Returns zero on success, negative values on failure.
*/
static int auditd_set(struct pid *pid, u32 portid, struct net *net,
struct sk_buff *skb, bool *ack)
{
unsigned long flags;
struct auditd_connection *ac_old, *ac_new;
struct nlmsghdr *nlh;
if (!pid || !net)
return -EINVAL;
ac_new = kzalloc(sizeof(*ac_new), GFP_KERNEL);
if (!ac_new)
return -ENOMEM;
ac_new->pid = get_pid(pid);
ac_new->portid = portid;
ac_new->net = get_net(net);
/* send the ack now to avoid a race with the queue backlog */
if (*ack) {
nlh = nlmsg_hdr(skb);
netlink_ack(skb, nlh, 0, NULL);
*ack = false;
}
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
rcu_assign_pointer(auditd_conn, ac_new);
spin_unlock_irqrestore(&auditd_conn_lock, flags);
if (ac_old)
call_rcu(&ac_old->rcu, auditd_conn_free);
return 0;
}
/**
* kauditd_printk_skb - Print the audit record to the ring buffer
* @skb: audit record
*
* Whatever the reason, this packet may not make it to the auditd connection
* so write it via printk so the information isn't completely lost.
*/
static void kauditd_printk_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
char *data = nlmsg_data(nlh);
if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit())
pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
}
/**
* kauditd_rehold_skb - Handle a audit record send failure in the hold queue
* @skb: audit record
* @error: error code (unused)
*
* Description:
* This should only be used by the kauditd_thread when it fails to flush the
* hold queue.
*/
static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error)
{
/* put the record back in the queue */
skb_queue_tail(&audit_hold_queue, skb);
}
/**
* kauditd_hold_skb - Queue an audit record, waiting for auditd
* @skb: audit record
* @error: error code
*
* Description:
* Queue the audit record, waiting for an instance of auditd. When this
* function is called we haven't given up yet on sending the record, but things
* are not looking good. The first thing we want to do is try to write the
* record via printk and then see if we want to try and hold on to the record
* and queue it, if we have room. If we want to hold on to the record, but we
* don't have room, record a record lost message.
*/
static void kauditd_hold_skb(struct sk_buff *skb, int error)
{
/* at this point it is uncertain if we will ever send this to auditd so
* try to send the message via printk before we go any further */
kauditd_printk_skb(skb);
/* can we just silently drop the message? */
if (!audit_default)
goto drop;
/* the hold queue is only for when the daemon goes away completely,
* not -EAGAIN failures; if we are in a -EAGAIN state requeue the
* record on the retry queue unless it's full, in which case drop it
*/
if (error == -EAGAIN) {
if (!audit_backlog_limit ||
skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_retry_queue, skb);
return;
}
audit_log_lost("kauditd retry queue overflow");
goto drop;
}
/* if we have room in the hold queue, queue the message */
if (!audit_backlog_limit ||
skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_hold_queue, skb);
return;
}
/* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow");
drop:
kfree_skb(skb);
}
/**
* kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
* @skb: audit record
* @error: error code (unused)
*
* Description:
* Not as serious as kauditd_hold_skb() as we still have a connected auditd,
* but for some reason we are having problems sending it audit records so
* queue the given record and attempt to resend.
*/
static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error)
{
if (!audit_backlog_limit ||
skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_retry_queue, skb);
return;
}
/* we have to drop the record, send it via printk as a last effort */
kauditd_printk_skb(skb);
audit_log_lost("kauditd retry queue overflow");
kfree_skb(skb);
}
/**
* auditd_reset - Disconnect the auditd connection
* @ac: auditd connection state
*
* Description:
* Break the auditd/kauditd connection and move all the queued records into the
* hold queue in case auditd reconnects. It is important to note that the @ac
* pointer should never be dereferenced inside this function as it may be NULL
* or invalid, you can only compare the memory address! If @ac is NULL then
* the connection will always be reset.
*/
static void auditd_reset(const struct auditd_connection *ac)
{
unsigned long flags;
struct sk_buff *skb;
struct auditd_connection *ac_old;
/* if it isn't already broken, break the connection */
spin_lock_irqsave(&auditd_conn_lock, flags);
ac_old = rcu_dereference_protected(auditd_conn,
lockdep_is_held(&auditd_conn_lock));
if (ac && ac != ac_old) {
/* someone already registered a new auditd connection */
spin_unlock_irqrestore(&auditd_conn_lock, flags);
return;
}
rcu_assign_pointer(auditd_conn, NULL);
spin_unlock_irqrestore(&auditd_conn_lock, flags);
if (ac_old)
call_rcu(&ac_old->rcu, auditd_conn_free);
/* flush the retry queue to the hold queue, but don't touch the main
* queue since we need to process that normally for multicast */
while ((skb = skb_dequeue(&audit_retry_queue)))
kauditd_hold_skb(skb, -ECONNREFUSED);
}
/**
* auditd_send_unicast_skb - Send a record via unicast to auditd
* @skb: audit record
*
* Description:
* Send a skb to the audit daemon, returns positive/zero values on success and
* negative values on failure; in all cases the skb will be consumed by this
* function. If the send results in -ECONNREFUSED the connection with auditd
* will be reset. This function may sleep so callers should not hold any locks
* where this would cause a problem.
*/
static int auditd_send_unicast_skb(struct sk_buff *skb)
{
int rc;
u32 portid;
struct net *net;
struct sock *sk;
struct auditd_connection *ac;
/* NOTE: we can't call netlink_unicast while in the RCU section so
* take a reference to the network namespace and grab local
* copies of the namespace, the sock, and the portid; the
* namespace and sock aren't going to go away while we hold a
* reference and if the portid does become invalid after the RCU
* section netlink_unicast() should safely return an error */
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac) {
rcu_read_unlock();
kfree_skb(skb);
rc = -ECONNREFUSED;
goto err;
}
net = get_net(ac->net);
sk = audit_get_sk(net);
portid = ac->portid;
rcu_read_unlock();
rc = netlink_unicast(sk, skb, portid, 0);
put_net(net);
if (rc < 0)
goto err;
return rc;
err:
if (ac && rc == -ECONNREFUSED)
auditd_reset(ac);
return rc;
}
/**
* kauditd_send_queue - Helper for kauditd_thread to flush skb queues
* @sk: the sending sock
* @portid: the netlink destination
* @queue: the skb queue to process
* @retry_limit: limit on number of netlink unicast failures
* @skb_hook: per-skb hook for additional processing
* @err_hook: hook called if the skb fails the netlink unicast send
*
* Description:
* Run through the given queue and attempt to send the audit records to auditd,
* returns zero on success, negative values on failure. It is up to the caller
* to ensure that the @sk is valid for the duration of this function.
*
*/
static int kauditd_send_queue(struct sock *sk, u32 portid,
struct sk_buff_head *queue,
unsigned int retry_limit,
void (*skb_hook)(struct sk_buff *skb),
void (*err_hook)(struct sk_buff *skb, int error))
{
int rc = 0;
struct sk_buff *skb = NULL;
struct sk_buff *skb_tail;
unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
skb_tail = skb_peek_tail(queue);
while ((skb != skb_tail) && (skb = skb_dequeue(queue))) {
/* call the skb_hook for each skb we touch */
if (skb_hook)
(*skb_hook)(skb);
/* can we send to anyone via unicast? */
if (!sk) {
if (err_hook)
(*err_hook)(skb, -ECONNREFUSED);
continue;
}
retry:
/* grab an extra skb reference in case of error */
skb_get(skb);
rc = netlink_unicast(sk, skb, portid, 0);
if (rc < 0) {
/* send failed - try a few times unless fatal error */
if (++failed >= retry_limit ||
rc == -ECONNREFUSED || rc == -EPERM) {
sk = NULL;
if (err_hook)
(*err_hook)(skb, rc);
if (rc == -EAGAIN)
rc = 0;
/* continue to drain the queue */
continue;
} else
goto retry;
} else {
/* skb sent - drop the extra reference and continue */
consume_skb(skb);
failed = 0;
}
}
return (rc >= 0 ? 0 : rc);
}
/*
* kauditd_send_multicast_skb - Send a record to any multicast listeners
* @skb: audit record
*
* Description:
* Write a multicast message to anyone listening in the initial network
* namespace. This function doesn't consume an skb as might be expected since
* it has to copy it anyways.
*/
static void kauditd_send_multicast_skb(struct sk_buff *skb)
{
struct sk_buff *copy;
struct sock *sock = audit_get_sk(&init_net);
struct nlmsghdr *nlh;
/* NOTE: we are not taking an additional reference for init_net since
* we don't have to worry about it going away */
if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG))
return;
/*
* The seemingly wasteful skb_copy() rather than bumping the refcount
* using skb_get() is necessary because non-standard mods are made to
* the skb by the original kaudit unicast socket send routine. The
* existing auditd daemon assumes this breakage. Fixing this would
* require co-ordinating a change in the established protocol between
* the kaudit kernel subsystem and the auditd userspace code. There is
* no reason for new multicast clients to continue with this
* non-compliance.
*/
copy = skb_copy(skb, GFP_KERNEL);
if (!copy)
return;
nlh = nlmsg_hdr(copy);
nlh->nlmsg_len = skb->len;
nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
}
/**
* kauditd_thread - Worker thread to send audit records to userspace
* @dummy: unused
*/
static int kauditd_thread(void *dummy)
{
int rc;
u32 portid = 0;
struct net *net = NULL;
struct sock *sk = NULL;
struct auditd_connection *ac;
#define UNICAST_RETRIES 5
set_freezable();
while (!kthread_should_stop()) {
/* NOTE: see the lock comments in auditd_send_unicast_skb() */
rcu_read_lock();
ac = rcu_dereference(auditd_conn);
if (!ac) {
rcu_read_unlock();
goto main_queue;
}
net = get_net(ac->net);
sk = audit_get_sk(net);
portid = ac->portid;
rcu_read_unlock();
/* attempt to flush the hold queue */
rc = kauditd_send_queue(sk, portid,
&audit_hold_queue, UNICAST_RETRIES,
NULL, kauditd_rehold_skb);
if (rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
}
/* attempt to flush the retry queue */
rc = kauditd_send_queue(sk, portid,
&audit_retry_queue, UNICAST_RETRIES,
NULL, kauditd_hold_skb);
if (rc < 0) {
sk = NULL;
auditd_reset(ac);
goto main_queue;
}
main_queue:
/* process the main queue - do the multicast send and attempt
* unicast, dump failed record sends to the retry queue; if
* sk == NULL due to previous failures we will just do the
* multicast send and move the record to the hold queue */
rc = kauditd_send_queue(sk, portid, &audit_queue, 1,
kauditd_send_multicast_skb,
(sk ?
kauditd_retry_skb : kauditd_hold_skb));
if (ac && rc < 0)
auditd_reset(ac);
sk = NULL;
/* drop our netns reference, no auditd sends past this line */
if (net) {
put_net(net);
net = NULL;
}
/* we have processed all the queues so wake everyone */
wake_up(&audit_backlog_wait);
/* NOTE: we want to wake up if there is anything on the queue,
* regardless of if an auditd is connected, as we need to
* do the multicast send and rotate records from the
* main queue to the retry/hold queues */
wait_event_freezable(kauditd_wait,
(skb_queue_len(&audit_queue) ? 1 : 0));
}
return 0;
}
int audit_send_list_thread(void *_dest)
{
struct audit_netlink_list *dest = _dest;
struct sk_buff *skb;
struct sock *sk = audit_get_sk(dest->net);
/* wait for parent to finish and send an ACK */
audit_ctl_lock();
audit_ctl_unlock();
while ((skb = __skb_dequeue(&dest->q)) != NULL)
netlink_unicast(sk, skb, dest->portid, 0);
put_net(dest->net);
kfree(dest);
return 0;
}
struct sk_buff *audit_make_reply(int seq, int type, int done,
int multi, const void *payload, int size)
{
struct sk_buff *skb;
struct nlmsghdr *nlh;
void *data;
int flags = multi ? NLM_F_MULTI : 0;
int t = done ? NLMSG_DONE : type;
skb = nlmsg_new(size, GFP_KERNEL);
if (!skb)
return NULL;
nlh = nlmsg_put(skb, 0, seq, t, size, flags);
if (!nlh)
goto out_kfree_skb;
data = nlmsg_data(nlh);
memcpy(data, payload, size);
return skb;
out_kfree_skb:
kfree_skb(skb);
return NULL;
}
static void audit_free_reply(struct audit_reply *reply)
{
if (!reply)
return;
kfree_skb(reply->skb);
if (reply->net)
put_net(reply->net);
kfree(reply);
}
static int audit_send_reply_thread(void *arg)
{
struct audit_reply *reply = (struct audit_reply *)arg;
audit_ctl_lock();
audit_ctl_unlock();
/* Ignore failure. It'll only happen if the sender goes away,
because our timeout is set to infinite. */
netlink_unicast(audit_get_sk(reply->net), reply->skb, reply->portid, 0);
reply->skb = NULL;
audit_free_reply(reply);
return 0;
}
/**
* audit_send_reply - send an audit reply message via netlink
* @request_skb: skb of request we are replying to (used to target the reply)
* @seq: sequence number
* @type: audit message type
* @done: done (last) flag
* @multi: multi-part message flag
* @payload: payload data
* @size: payload size
*
* Allocates a skb, builds the netlink message, and sends it to the port id.
*/
static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
int multi, const void *payload, int size)
{
struct task_struct *tsk;
struct audit_reply *reply;
reply = kzalloc(sizeof(*reply), GFP_KERNEL);
if (!reply)
return;
reply->skb = audit_make_reply(seq, type, done, multi, payload, size);
if (!reply->skb)
goto err;
reply->net = get_net(sock_net(NETLINK_CB(request_skb).sk));
reply->portid = NETLINK_CB(request_skb).portid;
tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
if (IS_ERR(tsk))
goto err;
return;
err:
audit_free_reply(reply);
}
/*
* Check for appropriate CAP_AUDIT_ capabilities on incoming audit
* control messages.
*/
static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
{
int err = 0;
/* Only support initial user namespace for now. */
/*
* We return ECONNREFUSED because it tricks userspace into thinking
* that audit was not configured into the kernel. Lots of users
* configure their PAM stack (because that's what the distro does)
* to reject login if unable to send messages to audit. If we return
* ECONNREFUSED the PAM stack thinks the kernel does not have audit
* configured in and will let login proceed. If we return EPERM
* userspace will reject all logins. This should be removed when we
* support non init namespaces!!
*/
if (current_user_ns() != &init_user_ns)
return -ECONNREFUSED;
switch (msg_type) {
case AUDIT_LIST:
case AUDIT_ADD:
case AUDIT_DEL:
return -EOPNOTSUPP;
case AUDIT_GET:
case AUDIT_SET:
case AUDIT_GET_FEATURE:
case AUDIT_SET_FEATURE:
case AUDIT_LIST_RULES:
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
case AUDIT_SIGNAL_INFO:
case AUDIT_TTY_GET:
case AUDIT_TTY_SET:
case AUDIT_TRIM:
case AUDIT_MAKE_EQUIV:
/* Only support auditd and auditctl in initial pid namespace
* for now. */
if (task_active_pid_ns(current) != &init_pid_ns)
return -EPERM;
if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
err = -EPERM;
break;
case AUDIT_USER:
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
if (!netlink_capable(skb, CAP_AUDIT_WRITE))
err = -EPERM;
break;
default: /* bad msg */
err = -EINVAL;
}
return err;
}
static void audit_log_common_recv_msg(struct audit_context *context,
struct audit_buffer **ab, u16 msg_type)
{
uid_t uid = from_kuid(&init_user_ns, current_uid());
pid_t pid = task_tgid_nr(current);
if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
*ab = NULL;
return;
}
*ab = audit_log_start(context, GFP_KERNEL, msg_type);
if (unlikely(!*ab))
return;
audit_log_format(*ab, "pid=%d uid=%u ", pid, uid);
audit_log_session_info(*ab);
audit_log_task_context(*ab);
}
static inline void audit_log_user_recv_msg(struct audit_buffer **ab,
u16 msg_type)
{
audit_log_common_recv_msg(NULL, ab, msg_type);
}
static int is_audit_feature_set(int i)
{
return af.features & AUDIT_FEATURE_TO_MASK(i);
}
static int audit_get_feature(struct sk_buff *skb)
{
u32 seq;
seq = nlmsg_hdr(skb)->nlmsg_seq;
audit_send_reply(skb, seq, AUDIT_GET_FEATURE, 0, 0, &af, sizeof(af));
return 0;
}
static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature,
u32 old_lock, u32 new_lock, int res)
{
struct audit_buffer *ab;
if (audit_enabled == AUDIT_OFF)
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_FEATURE_CHANGE);
if (!ab)
return;
audit_log_task_info(ab);
audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
audit_feature_names[which], !!old_feature, !!new_feature,
!!old_lock, !!new_lock, res);
audit_log_end(ab);
}
static int audit_set_feature(struct audit_features *uaf)
{
int i;
BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
/* if there is ever a version 2 we should handle that here */
for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
u32 feature = AUDIT_FEATURE_TO_MASK(i);
u32 old_feature, new_feature, old_lock, new_lock;
/* if we are not changing this feature, move along */
if (!(feature & uaf->mask))
continue;
old_feature = af.features & feature;
new_feature = uaf->features & feature;
new_lock = (uaf->lock | af.lock) & feature;
old_lock = af.lock & feature;
/* are we changing a locked feature? */
if (old_lock && (new_feature != old_feature)) {
audit_log_feature_change(i, old_feature, new_feature,
old_lock, new_lock, 0);
return -EPERM;
}
}
/* nothing invalid, do the changes */
for (i = 0; i <= AUDIT_LAST_FEATURE; i++) {
u32 feature = AUDIT_FEATURE_TO_MASK(i);
u32 old_feature, new_feature, old_lock, new_lock;
/* if we are not changing this feature, move along */
if (!(feature & uaf->mask))
continue;
old_feature = af.features & feature;
new_feature = uaf->features & feature;
old_lock = af.lock & feature;
new_lock = (uaf->lock | af.lock) & feature;
if (new_feature != old_feature)
audit_log_feature_change(i, old_feature, new_feature,
old_lock, new_lock, 1);
if (new_feature)
af.features |= feature;
else
af.features &= ~feature;
af.lock |= new_lock;
}
return 0;
}
static int audit_replace(struct pid *pid)
{
pid_t pvnr;
struct sk_buff *skb;
pvnr = pid_vnr(pid);
skb = audit_make_reply(0, AUDIT_REPLACE, 0, 0, &pvnr, sizeof(pvnr));
if (!skb)
return -ENOMEM;
return auditd_send_unicast_skb(skb);
}
static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
bool *ack)
{
u32 seq;
void *data;
int data_len;
int err;
struct audit_buffer *ab;
u16 msg_type = nlh->nlmsg_type;
struct audit_sig_info *sig_data;
struct lsm_context lsmctx = { NULL, 0, 0 };
err = audit_netlink_ok(skb, msg_type);
if (err)
return err;
seq = nlh->nlmsg_seq;
data = nlmsg_data(nlh);
data_len = nlmsg_len(nlh);
switch (msg_type) {
case AUDIT_GET: {
struct audit_status s;
memset(&s, 0, sizeof(s));
s.enabled = audit_enabled;
s.failure = audit_failure;
/* NOTE: use pid_vnr() so the PID is relative to the current
* namespace */
s.pid = auditd_pid_vnr();
s.rate_limit = audit_rate_limit;
s.backlog_limit = audit_backlog_limit;
s.lost = atomic_read(&audit_lost);
s.backlog = skb_queue_len(&audit_queue);
s.feature_bitmap = AUDIT_FEATURE_BITMAP_ALL;
s.backlog_wait_time = audit_backlog_wait_time;
s.backlog_wait_time_actual = atomic_read(&audit_backlog_wait_time_actual);
audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_SET: {
struct audit_status s;
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
if (s.mask & AUDIT_STATUS_ENABLED) {
err = audit_set_enabled(s.enabled);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_FAILURE) {
err = audit_set_failure(s.failure);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_PID) {
/* NOTE: we are using the vnr PID functions below
* because the s.pid value is relative to the
* namespace of the caller; at present this
* doesn't matter much since you can really only
* run auditd from the initial pid namespace, but
* something to keep in mind if this changes */
pid_t new_pid = s.pid;
pid_t auditd_pid;
struct pid *req_pid = task_tgid(current);
/* Sanity check - PID values must match. Setting
* pid to 0 is how auditd ends auditing. */
if (new_pid && (new_pid != pid_vnr(req_pid)))
return -EINVAL;
/* test the auditd connection */
audit_replace(req_pid);
auditd_pid = auditd_pid_vnr();
if (auditd_pid) {
/* replacing a healthy auditd is not allowed */
if (new_pid) {
audit_log_config_change("audit_pid",
new_pid, auditd_pid, 0);
return -EEXIST;
}
/* only current auditd can unregister itself */
if (pid_vnr(req_pid) != auditd_pid) {
audit_log_config_change("audit_pid",
new_pid, auditd_pid, 0);
return -EACCES;
}
}
if (new_pid) {
/* register a new auditd connection */
err = auditd_set(req_pid,
NETLINK_CB(skb).portid,
sock_net(NETLINK_CB(skb).sk),
skb, ack);
if (audit_enabled != AUDIT_OFF)
audit_log_config_change("audit_pid",
new_pid,
auditd_pid,
err ? 0 : 1);
if (err)
return err;
/* try to process any backlog */
wake_up_interruptible(&kauditd_wait);
} else {
if (audit_enabled != AUDIT_OFF)
audit_log_config_change("audit_pid",
new_pid,
auditd_pid, 1);
/* unregister the auditd connection */
auditd_reset(NULL);
}
}
if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
err = audit_set_rate_limit(s.rate_limit);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_BACKLOG_LIMIT) {
err = audit_set_backlog_limit(s.backlog_limit);
if (err < 0)
return err;
}
if (s.mask & AUDIT_STATUS_BACKLOG_WAIT_TIME) {
if (sizeof(s) > (size_t)nlh->nlmsg_len)
return -EINVAL;
if (s.backlog_wait_time > 10*AUDIT_BACKLOG_WAIT_TIME)
return -EINVAL;
err = audit_set_backlog_wait_time(s.backlog_wait_time);
if (err < 0)
return err;
}
if (s.mask == AUDIT_STATUS_LOST) {
u32 lost = atomic_xchg(&audit_lost, 0);
audit_log_config_change("lost", 0, lost, 1);
return lost;
}
if (s.mask == AUDIT_STATUS_BACKLOG_WAIT_TIME_ACTUAL) {
u32 actual = atomic_xchg(&audit_backlog_wait_time_actual, 0);
audit_log_config_change("backlog_wait_time_actual", 0, actual, 1);
return actual;
}
break;
}
case AUDIT_GET_FEATURE:
err = audit_get_feature(skb);
if (err)
return err;
break;
case AUDIT_SET_FEATURE:
if (data_len < sizeof(struct audit_features))
return -EINVAL;
err = audit_set_feature(data);
if (err)
return err;
break;
case AUDIT_USER:
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
if (!audit_enabled && msg_type != AUDIT_USER_AVC)
return 0;
/* exit early if there isn't at least one character to print */
if (data_len < 2)
return -EINVAL;
err = audit_filter(msg_type, AUDIT_FILTER_USER);
if (err == 1) { /* match or error */
char *str = data;
err = 0;
if (msg_type == AUDIT_USER_TTY) {
err = tty_audit_push();
if (err)
break;
}
audit_log_user_recv_msg(&ab, msg_type);
if (msg_type != AUDIT_USER_TTY) {
/* ensure NULL termination */
str[data_len - 1] = '\0';
audit_log_format(ab, " msg='%.*s'",
AUDIT_MESSAGE_TEXT_MAX,
str);
} else {
audit_log_format(ab, " data=");
if (str[data_len - 1] == '\0')
data_len--;
audit_log_n_untrustedstring(ab, str, data_len);
}
audit_log_end(ab);
}
break;
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
if (data_len < sizeof(struct audit_rule_data))
return -EINVAL;
if (audit_enabled == AUDIT_LOCKED) {
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=%s audit_enabled=%d res=0",
msg_type == AUDIT_ADD_RULE ?
"add_rule" : "remove_rule",
audit_enabled);
audit_log_end(ab);
return -EPERM;
}
err = audit_rule_change(msg_type, seq, data, data_len);
break;
case AUDIT_LIST_RULES:
err = audit_list_rules_send(skb, seq);
break;
case AUDIT_TRIM:
audit_trim_trees();
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=trim res=1");
audit_log_end(ab);
break;
case AUDIT_MAKE_EQUIV: {
void *bufp = data;
u32 sizes[2];
size_t msglen = data_len;
char *old, *new;
err = -EINVAL;
if (msglen < 2 * sizeof(u32))
break;
memcpy(sizes, bufp, 2 * sizeof(u32));
bufp += 2 * sizeof(u32);
msglen -= 2 * sizeof(u32);
old = audit_unpack_string(&bufp, &msglen, sizes[0]);
if (IS_ERR(old)) {
err = PTR_ERR(old);
break;
}
new = audit_unpack_string(&bufp, &msglen, sizes[1]);
if (IS_ERR(new)) {
err = PTR_ERR(new);
kfree(old);
break;
}
/* OK, here comes... */
err = audit_tag_tree(old, new);
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=make_equiv old=");
audit_log_untrustedstring(ab, old);
audit_log_format(ab, " new=");
audit_log_untrustedstring(ab, new);
audit_log_format(ab, " res=%d", !err);
audit_log_end(ab);
kfree(old);
kfree(new);
break;
}
case AUDIT_SIGNAL_INFO:
if (lsmprop_is_set(&audit_sig_lsm)) {
err = security_lsmprop_to_secctx(&audit_sig_lsm,
&lsmctx, LSM_ID_UNDEF);
if (err < 0)
return err;
}
sig_data = kmalloc(struct_size(sig_data, ctx, lsmctx.len),
GFP_KERNEL);
if (!sig_data) {
if (lsmprop_is_set(&audit_sig_lsm))
security_release_secctx(&lsmctx);
return -ENOMEM;
}
sig_data->uid = from_kuid(&init_user_ns, audit_sig_uid);
sig_data->pid = audit_sig_pid;
if (lsmprop_is_set(&audit_sig_lsm)) {
memcpy(sig_data->ctx, lsmctx.context, lsmctx.len);
security_release_secctx(&lsmctx);
}
audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0,
sig_data, struct_size(sig_data, ctx,
lsmctx.len));
kfree(sig_data);
break;
case AUDIT_TTY_GET: {
struct audit_tty_status s;
unsigned int t;
t = READ_ONCE(current->signal->audit_tty);
s.enabled = t & AUDIT_TTY_ENABLE;
s.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_TTY_SET: {
struct audit_tty_status s, old;
struct audit_buffer *ab;
unsigned int t;
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
/* check if new data is valid */
if ((s.enabled != 0 && s.enabled != 1) ||
(s.log_passwd != 0 && s.log_passwd != 1))
err = -EINVAL;
if (err)
t = READ_ONCE(current->signal->audit_tty);
else {
t = s.enabled | (-s.log_passwd & AUDIT_TTY_LOG_PASSWD);
t = xchg(¤t->signal->audit_tty, t);
}
old.enabled = t & AUDIT_TTY_ENABLE;
old.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
audit_log_common_recv_msg(audit_context(), &ab,
AUDIT_CONFIG_CHANGE);
audit_log_format(ab, " op=tty_set old-enabled=%d new-enabled=%d"
" old-log_passwd=%d new-log_passwd=%d res=%d",
old.enabled, s.enabled, old.log_passwd,
s.log_passwd, !err);
audit_log_end(ab);
break;
}
default:
err = -EINVAL;
break;
}
return err < 0 ? err : 0;
}
/**
* audit_receive - receive messages from a netlink control socket
* @skb: the message buffer
*
* Parse the provided skb and deal with any messages that may be present,
* malformed skbs are discarded.
*/
static void audit_receive(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
bool ack;
/*
* len MUST be signed for nlmsg_next to be able to dec it below 0
* if the nlmsg_len was not aligned
*/
int len;
int err;
nlh = nlmsg_hdr(skb);
len = skb->len;
audit_ctl_lock();
while (nlmsg_ok(nlh, len)) {
ack = nlh->nlmsg_flags & NLM_F_ACK;
err = audit_receive_msg(skb, nlh, &ack);
/* send an ack if the user asked for one and audit_receive_msg
* didn't already do it, or if there was an error. */
if (ack || err)
netlink_ack(skb, nlh, err, NULL);
nlh = nlmsg_next(nlh, &len);
}
audit_ctl_unlock();
/* can't block with the ctrl lock, so penalize the sender now */
if (audit_backlog_limit &&
(skb_queue_len(&audit_queue) > audit_backlog_limit)) {
DECLARE_WAITQUEUE(wait, current);
/* wake kauditd to try and flush the queue */
wake_up_interruptible(&kauditd_wait);
add_wait_queue_exclusive(&audit_backlog_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(audit_backlog_wait_time);
remove_wait_queue(&audit_backlog_wait, &wait);
}
}
/* Log information about who is connecting to the audit multicast socket */
static void audit_log_multicast(int group, const char *op, int err)
{
const struct cred *cred;
struct tty_struct *tty;
char comm[sizeof(current->comm)];
struct audit_buffer *ab;
if (!audit_enabled)
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_EVENT_LISTENER);
if (!ab)
return;
cred = current_cred();
tty = audit_get_tty();
audit_log_format(ab, "pid=%u uid=%u auid=%u tty=%s ses=%u",
task_tgid_nr(current),
from_kuid(&init_user_ns, cred->uid),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
tty ? tty_name(tty) : "(none)",
audit_get_sessionid(current));
audit_put_tty(tty);
audit_log_task_context(ab); /* subj= */
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_d_path_exe(ab, current->mm); /* exe= */
audit_log_format(ab, " nl-mcgrp=%d op=%s res=%d", group, op, !err);
audit_log_end(ab);
}
/* Run custom bind function on netlink socket group connect or bind requests. */
static int audit_multicast_bind(struct net *net, int group)
{
int err = 0;
if (!capable(CAP_AUDIT_READ))
err = -EPERM;
audit_log_multicast(group, "connect", err);
return err;
}
static void audit_multicast_unbind(struct net *net, int group)
{
audit_log_multicast(group, "disconnect", 0);
}
static int __net_init audit_net_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = audit_receive,
.bind = audit_multicast_bind,
.unbind = audit_multicast_unbind,
.flags = NL_CFG_F_NONROOT_RECV,
.groups = AUDIT_NLGRP_MAX,
};
struct audit_net *aunet = net_generic(net, audit_net_id);
aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg);
if (aunet->sk == NULL) {
audit_panic("cannot initialize netlink socket in namespace");
return -ENOMEM;
}
/* limit the timeout in case auditd is blocked/stopped */
aunet->sk->sk_sndtimeo = HZ / 10;
return 0;
}
static void __net_exit audit_net_exit(struct net *net)
{
struct audit_net *aunet = net_generic(net, audit_net_id);
/* NOTE: you would think that we would want to check the auditd
* connection and potentially reset it here if it lives in this
* namespace, but since the auditd connection tracking struct holds a
* reference to this namespace (see auditd_set()) we are only ever
* going to get here after that connection has been released */
netlink_kernel_release(aunet->sk);
}
static struct pernet_operations audit_net_ops __net_initdata = {
.init = audit_net_init,
.exit = audit_net_exit,
.id = &audit_net_id,
.size = sizeof(struct audit_net),
};
/* Initialize audit support at boot time. */
static int __init audit_init(void)
{
int i;
if (audit_initialized == AUDIT_DISABLED)
return 0;
audit_buffer_cache = KMEM_CACHE(audit_buffer, SLAB_PANIC);
skb_queue_head_init(&audit_queue);
skb_queue_head_init(&audit_retry_queue);
skb_queue_head_init(&audit_hold_queue);
for (i = 0; i < AUDIT_INODE_BUCKETS; i++)
INIT_LIST_HEAD(&audit_inode_hash[i]);
mutex_init(&audit_cmd_mutex.lock);
audit_cmd_mutex.owner = NULL;
pr_info("initializing netlink subsys (%s)\n",
str_enabled_disabled(audit_default));
register_pernet_subsys(&audit_net_ops);
audit_initialized = AUDIT_INITIALIZED;
kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd");
if (IS_ERR(kauditd_task)) {
int err = PTR_ERR(kauditd_task);
panic("audit: failed to start the kauditd thread (%d)\n", err);
}
audit_log(NULL, GFP_KERNEL, AUDIT_KERNEL,
"state=initialized audit_enabled=%u res=1",
audit_enabled);
return 0;
}
postcore_initcall(audit_init);
/*
* Process kernel command-line parameter at boot time.
* audit={0|off} or audit={1|on}.
*/
static int __init audit_enable(char *str)
{
if (!strcasecmp(str, "off") || !strcmp(str, "0"))
audit_default = AUDIT_OFF;
else if (!strcasecmp(str, "on") || !strcmp(str, "1"))
audit_default = AUDIT_ON;
else {
pr_err("audit: invalid 'audit' parameter value (%s)\n", str);
audit_default = AUDIT_ON;
}
if (audit_default == AUDIT_OFF)
audit_initialized = AUDIT_DISABLED;
if (audit_set_enabled(audit_default))
pr_err("audit: error setting audit state (%d)\n",
audit_default);
pr_info("%s\n", audit_default ?
"enabled (after initialization)" : "disabled (until reboot)");
return 1;
}
__setup("audit=", audit_enable);
/* Process kernel command-line parameter at boot time.
* audit_backlog_limit=<n> */
static int __init audit_backlog_limit_set(char *str)
{
u32 audit_backlog_limit_arg;
pr_info("audit_backlog_limit: ");
if (kstrtouint(str, 0, &audit_backlog_limit_arg)) {
pr_cont("using default of %u, unable to parse %s\n",
audit_backlog_limit, str);
return 1;
}
audit_backlog_limit = audit_backlog_limit_arg;
pr_cont("%d\n", audit_backlog_limit);
return 1;
}
__setup("audit_backlog_limit=", audit_backlog_limit_set);
static void audit_buffer_free(struct audit_buffer *ab)
{
struct sk_buff *skb;
if (!ab)
return;
while ((skb = skb_dequeue(&ab->skb_list)))
kfree_skb(skb);
kmem_cache_free(audit_buffer_cache, ab);
}
static struct audit_buffer *audit_buffer_alloc(struct audit_context *ctx,
gfp_t gfp_mask, int type)
{
struct audit_buffer *ab;
ab = kmem_cache_alloc(audit_buffer_cache, gfp_mask);
if (!ab)
return NULL;
skb_queue_head_init(&ab->skb_list);
ab->skb = nlmsg_new(AUDIT_BUFSIZ, gfp_mask);
if (!ab->skb)
goto err;
skb_queue_tail(&ab->skb_list, ab->skb);
if (!nlmsg_put(ab->skb, 0, 0, type, 0, 0))
goto err;
ab->ctx = ctx;
ab->gfp_mask = gfp_mask;
return ab;
err:
audit_buffer_free(ab);
return NULL;
}
/**
* audit_serial - compute a serial number for the audit record
*
* Compute a serial number for the audit record. Audit records are
* written to user-space as soon as they are generated, so a complete
* audit record may be written in several pieces. The timestamp of the
* record and this serial number are used by the user-space tools to
* determine which pieces belong to the same audit record. The
* (timestamp,serial) tuple is unique for each syscall and is live from
* syscall entry to syscall exit.
*
* NOTE: Another possibility is to store the formatted records off the
* audit context (for those records that have a context), and emit them
* all at syscall exit. However, this could delay the reporting of
* significant errors until syscall exit (or never, if the system
* halts).
*/
unsigned int audit_serial(void)
{
static atomic_t serial = ATOMIC_INIT(0);
return atomic_inc_return(&serial);
}
static inline void audit_get_stamp(struct audit_context *ctx,
struct audit_stamp *stamp)
{
if (!ctx || !auditsc_get_stamp(ctx, stamp)) {
ktime_get_coarse_real_ts64(&stamp->ctime);
stamp->serial = audit_serial();
}
}
/**
* audit_log_start - obtain an audit buffer
* @ctx: audit_context (may be NULL)
* @gfp_mask: type of allocation
* @type: audit message type
*
* Returns audit_buffer pointer on success or NULL on error.
*
* Obtain an audit buffer. This routine does locking to obtain the
* audit buffer, but then no locking is required for calls to
* audit_log_*format. If the task (ctx) is a task that is currently in a
* syscall, then the syscall is marked as auditable and an audit record
* will be written at syscall exit. If there is no associated task, then
* task context (ctx) should be NULL.
*/
struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
int type)
{
struct audit_buffer *ab;
if (audit_initialized != AUDIT_INITIALIZED)
return NULL;
if (unlikely(!audit_filter(type, AUDIT_FILTER_EXCLUDE)))
return NULL;
/* NOTE: don't ever fail/sleep on these two conditions:
* 1. auditd generated record - since we need auditd to drain the
* queue; also, when we are checking for auditd, compare PIDs using
* task_tgid_vnr() since auditd_pid is set in audit_receive_msg()
* using a PID anchored in the caller's namespace
* 2. generator holding the audit_cmd_mutex - we don't want to block
* while holding the mutex, although we do penalize the sender
* later in audit_receive() when it is safe to block
*/
if (!(auditd_test_task(current) || audit_ctl_owner_current())) {
long stime = audit_backlog_wait_time;
while (audit_backlog_limit &&
(skb_queue_len(&audit_queue) > audit_backlog_limit)) {
/* wake kauditd to try and flush the queue */
wake_up_interruptible(&kauditd_wait);
/* sleep if we are allowed and we haven't exhausted our
* backlog wait limit */
if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) {
long rtime = stime;
DECLARE_WAITQUEUE(wait, current);
add_wait_queue_exclusive(&audit_backlog_wait,
&wait);
set_current_state(TASK_UNINTERRUPTIBLE);
stime = schedule_timeout(rtime);
atomic_add(rtime - stime, &audit_backlog_wait_time_actual);
remove_wait_queue(&audit_backlog_wait, &wait);
} else {
if (audit_rate_check() && printk_ratelimit())
pr_warn("audit_backlog=%d > audit_backlog_limit=%d\n",
skb_queue_len(&audit_queue),
audit_backlog_limit);
audit_log_lost("backlog limit exceeded");
return NULL;
}
}
}
ab = audit_buffer_alloc(ctx, gfp_mask, type);
if (!ab) {
audit_log_lost("out of memory in audit_log_start");
return NULL;
}
audit_get_stamp(ab->ctx, &ab->stamp);
/* cancel dummy context to enable supporting records */
if (ctx)
ctx->dummy = 0;
audit_log_format(ab, "audit(%llu.%03lu:%u): ",
(unsigned long long)ab->stamp.ctime.tv_sec,
ab->stamp.ctime.tv_nsec/1000000,
ab->stamp.serial);
return ab;
}
/**
* audit_expand - expand skb in the audit buffer
* @ab: audit_buffer
* @extra: space to add at tail of the skb
*
* Returns 0 (no space) on failed expansion, or available space if
* successful.
*/
static inline int audit_expand(struct audit_buffer *ab, int extra)
{
struct sk_buff *skb = ab->skb;
int oldtail = skb_tailroom(skb);
int ret = pskb_expand_head(skb, 0, extra, ab->gfp_mask);
int newtail = skb_tailroom(skb);
if (ret < 0) {
audit_log_lost("out of memory in audit_expand");
return 0;
}
skb->truesize += newtail - oldtail;
return newtail;
}
/*
* Format an audit message into the audit buffer. If there isn't enough
* room in the audit buffer, more room will be allocated and vsnprint
* will be called a second time. Currently, we assume that a printk
* can't format message larger than 1024 bytes, so we don't either.
*/
static __printf(2, 0)
void audit_log_vformat(struct audit_buffer *ab, const char *fmt, va_list args)
{
int len, avail;
struct sk_buff *skb;
va_list args2;
if (!ab)
return;
BUG_ON(!ab->skb);
skb = ab->skb;
avail = skb_tailroom(skb);
if (avail == 0) {
avail = audit_expand(ab, AUDIT_BUFSIZ);
if (!avail)
goto out;
}
va_copy(args2, args);
len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args);
if (len >= avail) {
/* The printk buffer is 1024 bytes long, so if we get
* here and AUDIT_BUFSIZ is at least 1024, then we can
* log everything that printk could have logged. */
avail = audit_expand(ab,
max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
if (!avail)
goto out_va_end;
len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
}
if (len > 0)
skb_put(skb, len);
out_va_end:
va_end(args2);
out:
return;
}
/**
* audit_log_format - format a message into the audit buffer.
* @ab: audit_buffer
* @fmt: format string
* @...: optional parameters matching @fmt string
*
* All the work is done in audit_log_vformat.
*/
void audit_log_format(struct audit_buffer *ab, const char *fmt, ...)
{
va_list args;
if (!ab)
return;
va_start(args, fmt);
audit_log_vformat(ab, fmt, args);
va_end(args);
}
/**
* audit_log_n_hex - convert a buffer to hex and append it to the audit skb
* @ab: the audit_buffer
* @buf: buffer to convert to hex
* @len: length of @buf to be converted
*
* No return value; failure to expand is silently ignored.
*
* This function will take the passed buf and convert it into a string of
* ascii hex digits. The new string is placed onto the skb.
*/
void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
size_t len)
{
int i, avail, new_len;
unsigned char *ptr;
struct sk_buff *skb;
if (!ab)
return;
BUG_ON(!ab->skb);
skb = ab->skb;
avail = skb_tailroom(skb);
new_len = len<<1;
if (new_len >= avail) {
/* Round the buffer request up to the next multiple */
new_len = AUDIT_BUFSIZ*(((new_len-avail)/AUDIT_BUFSIZ) + 1);
avail = audit_expand(ab, new_len);
if (!avail)
return;
}
ptr = skb_tail_pointer(skb);
for (i = 0; i < len; i++)
ptr = hex_byte_pack_upper(ptr, buf[i]);
*ptr = 0;
skb_put(skb, len << 1); /* new string is twice the old string */
}
/*
* Format a string of no more than slen characters into the audit buffer,
* enclosed in quote marks.
*/
void audit_log_n_string(struct audit_buffer *ab, const char *string,
size_t slen)
{
int avail, new_len;
unsigned char *ptr;
struct sk_buff *skb;
if (!ab)
return;
BUG_ON(!ab->skb);
skb = ab->skb;
avail = skb_tailroom(skb);
new_len = slen + 3; /* enclosing quotes + null terminator */
if (new_len > avail) {
avail = audit_expand(ab, new_len);
if (!avail)
return;
}
ptr = skb_tail_pointer(skb);
*ptr++ = '"';
memcpy(ptr, string, slen);
ptr += slen;
*ptr++ = '"';
*ptr = 0;
skb_put(skb, slen + 2); /* don't include null terminator */
}
/**
* audit_string_contains_control - does a string need to be logged in hex
* @string: string to be checked
* @len: max length of the string to check
*/
bool audit_string_contains_control(const char *string, size_t len)
{
const unsigned char *p;
for (p = string; p < (const unsigned char *)string + len; p++) {
if (*p == '"' || *p < 0x21 || *p > 0x7e)
return true;
}
return false;
}
/**
* audit_log_n_untrustedstring - log a string that may contain random characters
* @ab: audit_buffer
* @string: string to be logged
* @len: length of string (not including trailing null)
*
* This code will escape a string that is passed to it if the string
* contains a control character, unprintable character, double quote mark,
* or a space. Unescaped strings will start and end with a double quote mark.
* Strings that are escaped are printed in hex (2 digits per char).
*
* The caller specifies the number of characters in the string to log, which may
* or may not be the entire string.
*/
void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string,
size_t len)
{
if (audit_string_contains_control(string, len))
audit_log_n_hex(ab, string, len);
else
audit_log_n_string(ab, string, len);
}
/**
* audit_log_untrustedstring - log a string that may contain random characters
* @ab: audit_buffer
* @string: string to be logged
*
* Same as audit_log_n_untrustedstring(), except that strlen is used to
* determine string length.
*/
void audit_log_untrustedstring(struct audit_buffer *ab, const char *string)
{
audit_log_n_untrustedstring(ab, string, strlen(string));
}
/* This is a helper-function to print the escaped d_path */
void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
const struct path *path)
{
char *p, *pathname;
if (prefix)
audit_log_format(ab, "%s", prefix);
/* We will allow 11 spaces for ' (deleted)' to be appended */
pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
if (!pathname) {
audit_log_format(ab, "\"<no_memory>\"");
return;
}
p = d_path(path, pathname, PATH_MAX+11);
if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
/* FIXME: can we save some information here? */
audit_log_format(ab, "\"<too_long>\"");
} else
audit_log_untrustedstring(ab, p);
kfree(pathname);
}
void audit_log_session_info(struct audit_buffer *ab)
{
unsigned int sessionid = audit_get_sessionid(current);
uid_t auid = from_kuid(&init_user_ns, audit_get_loginuid(current));
audit_log_format(ab, "auid=%u ses=%u", auid, sessionid);
}
void audit_log_key(struct audit_buffer *ab, char *key)
{
audit_log_format(ab, " key=");
if (key)
audit_log_untrustedstring(ab, key);
else
audit_log_format(ab, "(null)");
}
/**
* audit_buffer_aux_new - Add an aux record buffer to the skb list
* @ab: audit_buffer
* @type: message type
*
* Aux records are allocated and added to the skb list of
* the "main" record. The ab->skb is reset to point to the
* aux record on its creation. When the aux record in complete
* ab->skb has to be reset to point to the "main" record.
* This allows the audit_log_ functions to be ignorant of
* which kind of record it is logging to. It also avoids adding
* special data for aux records.
*
* On success ab->skb will point to the new aux record.
* Returns 0 on success, -ENOMEM should allocation fail.
*/
static int audit_buffer_aux_new(struct audit_buffer *ab, int type)
{
WARN_ON(ab->skb != skb_peek(&ab->skb_list));
ab->skb = nlmsg_new(AUDIT_BUFSIZ, ab->gfp_mask);
if (!ab->skb)
goto err;
if (!nlmsg_put(ab->skb, 0, 0, type, 0, 0))
goto err;
skb_queue_tail(&ab->skb_list, ab->skb);
audit_log_format(ab, "audit(%llu.%03lu:%u): ",
(unsigned long long)ab->stamp.ctime.tv_sec,
ab->stamp.ctime.tv_nsec/1000000,
ab->stamp.serial);
return 0;
err:
kfree_skb(ab->skb);
ab->skb = skb_peek(&ab->skb_list);
return -ENOMEM;
}
/**
* audit_buffer_aux_end - Switch back to the "main" record from an aux record
* @ab: audit_buffer
*
* Restores the "main" audit record to ab->skb.
*/
static void audit_buffer_aux_end(struct audit_buffer *ab)
{
ab->skb = skb_peek(&ab->skb_list);
}
/**
* audit_log_subj_ctx - Add LSM subject information
* @ab: audit_buffer
* @prop: LSM subject properties.
*
* Add a subj= field and, if necessary, a AUDIT_MAC_TASK_CONTEXTS record.
*/
int audit_log_subj_ctx(struct audit_buffer *ab, struct lsm_prop *prop)
{
struct lsm_context ctx;
char *space = "";
int error;
int i;
security_current_getlsmprop_subj(prop);
if (!lsmprop_is_set(prop))
return 0;
if (audit_subj_secctx_cnt < 2) {
error = security_lsmprop_to_secctx(prop, &ctx, LSM_ID_UNDEF);
if (error < 0) {
if (error != -EINVAL)
goto error_path;
return 0;
}
audit_log_format(ab, " subj=%s", ctx.context);
security_release_secctx(&ctx);
return 0;
}
/* Multiple LSMs provide contexts. Include an aux record. */
audit_log_format(ab, " subj=?");
error = audit_buffer_aux_new(ab, AUDIT_MAC_TASK_CONTEXTS);
if (error)
goto error_path;
for (i = 0; i < audit_subj_secctx_cnt; i++) {
error = security_lsmprop_to_secctx(prop, &ctx,
audit_subj_lsms[i]->id);
if (error < 0) {
/*
* Don't print anything. An LSM like BPF could
* claim to support contexts, but only do so under
* certain conditions.
*/
if (error == -EOPNOTSUPP)
continue;
if (error != -EINVAL)
audit_panic("error in audit_log_subj_ctx");
} else {
audit_log_format(ab, "%ssubj_%s=%s", space,
audit_subj_lsms[i]->name, ctx.context);
space = " ";
security_release_secctx(&ctx);
}
}
audit_buffer_aux_end(ab);
return 0;
error_path:
audit_panic("error in audit_log_subj_ctx");
return error;
}
EXPORT_SYMBOL(audit_log_subj_ctx);
int audit_log_task_context(struct audit_buffer *ab)
{
struct lsm_prop prop;
security_current_getlsmprop_subj(&prop);
return audit_log_subj_ctx(ab, &prop);
}
EXPORT_SYMBOL(audit_log_task_context);
int audit_log_obj_ctx(struct audit_buffer *ab, struct lsm_prop *prop)
{
int i;
int rc;
int error = 0;
char *space = "";
struct lsm_context ctx;
if (audit_obj_secctx_cnt < 2) {
error = security_lsmprop_to_secctx(prop, &ctx, LSM_ID_UNDEF);
if (error < 0) {
if (error != -EINVAL)
goto error_path;
return error;
}
audit_log_format(ab, " obj=%s", ctx.context);
security_release_secctx(&ctx);
return 0;
}
audit_log_format(ab, " obj=?");
error = audit_buffer_aux_new(ab, AUDIT_MAC_OBJ_CONTEXTS);
if (error)
goto error_path;
for (i = 0; i < audit_obj_secctx_cnt; i++) {
rc = security_lsmprop_to_secctx(prop, &ctx,
audit_obj_lsms[i]->id);
if (rc < 0) {
audit_log_format(ab, "%sobj_%s=?", space,
audit_obj_lsms[i]->name);
if (rc != -EINVAL)
audit_panic("error in audit_log_obj_ctx");
error = rc;
} else {
audit_log_format(ab, "%sobj_%s=%s", space,
audit_obj_lsms[i]->name, ctx.context);
security_release_secctx(&ctx);
}
space = " ";
}
audit_buffer_aux_end(ab);
return error;
error_path:
audit_panic("error in audit_log_obj_ctx");
return error;
}
void audit_log_d_path_exe(struct audit_buffer *ab,
struct mm_struct *mm)
{
struct file *exe_file;
if (!mm)
goto out_null;
exe_file = get_mm_exe_file(mm);
if (!exe_file)
goto out_null;
audit_log_d_path(ab, " exe=", &exe_file->f_path);
fput(exe_file);
return;
out_null:
audit_log_format(ab, " exe=(null)");
}
struct tty_struct *audit_get_tty(void)
{
struct tty_struct *tty = NULL;
unsigned long flags;
spin_lock_irqsave(¤t->sighand->siglock, flags);
if (current->signal)
tty = tty_kref_get(current->signal->tty);
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
return tty;
}
void audit_put_tty(struct tty_struct *tty)
{
tty_kref_put(tty);
}
void audit_log_task_info(struct audit_buffer *ab)
{
const struct cred *cred;
char comm[sizeof(current->comm)];
struct tty_struct *tty;
if (!ab)
return;
cred = current_cred();
tty = audit_get_tty();
audit_log_format(ab,
" ppid=%d pid=%d auid=%u uid=%u gid=%u"
" euid=%u suid=%u fsuid=%u"
" egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
task_ppid_nr(current),
task_tgid_nr(current),
from_kuid(&init_user_ns, audit_get_loginuid(current)),
from_kuid(&init_user_ns, cred->uid),
from_kgid(&init_user_ns, cred->gid),
from_kuid(&init_user_ns, cred->euid),
from_kuid(&init_user_ns, cred->suid),
from_kuid(&init_user_ns, cred->fsuid),
from_kgid(&init_user_ns, cred->egid),
from_kgid(&init_user_ns, cred->sgid),
from_kgid(&init_user_ns, cred->fsgid),
tty ? tty_name(tty) : "(none)",
audit_get_sessionid(current));
audit_put_tty(tty);
audit_log_format(ab, " comm=");
audit_log_untrustedstring(ab, get_task_comm(comm, current));
audit_log_d_path_exe(ab, current->mm);
audit_log_task_context(ab);
}
EXPORT_SYMBOL(audit_log_task_info);
/**
* audit_log_path_denied - report a path restriction denial
* @type: audit message type (AUDIT_ANOM_LINK, AUDIT_ANOM_CREAT, etc)
* @operation: specific operation name
*/
void audit_log_path_denied(int type, const char *operation)
{
struct audit_buffer *ab;
if (!audit_enabled)
return;
/* Generate log with subject, operation, outcome. */
ab = audit_log_start(audit_context(), GFP_KERNEL, type);
if (!ab)
return;
audit_log_format(ab, "op=%s", operation);
audit_log_task_info(ab);
audit_log_format(ab, " res=0");
audit_log_end(ab);
}
int audit_log_nf_skb(struct audit_buffer *ab,
const struct sk_buff *skb, u8 nfproto)
{
/* find the IP protocol in the case of NFPROTO_BRIDGE */
if (nfproto == NFPROTO_BRIDGE) {
switch (eth_hdr(skb)->h_proto) {
case htons(ETH_P_IP):
nfproto = NFPROTO_IPV4;
break;
case htons(ETH_P_IPV6):
nfproto = NFPROTO_IPV6;
break;
default:
goto unknown_proto;
}
}
switch (nfproto) {
case NFPROTO_IPV4: {
struct iphdr iph;
const struct iphdr *ih;
ih = skb_header_pointer(skb, skb_network_offset(skb),
sizeof(iph), &iph);
if (!ih)
return -ENOMEM;
switch (ih->protocol) {
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
th = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_tcph), &_tcph);
if (!th)
return -ENOMEM;
audit_log_format(ab, " saddr=%pI4 daddr=%pI4 proto=%hhu sport=%hu dport=%hu",
&ih->saddr, &ih->daddr, ih->protocol,
ntohs(th->source), ntohs(th->dest));
break;
}
case IPPROTO_UDP:
case IPPROTO_UDPLITE: {
struct udphdr _udph;
const struct udphdr *uh;
uh = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_udph), &_udph);
if (!uh)
return -ENOMEM;
audit_log_format(ab, " saddr=%pI4 daddr=%pI4 proto=%hhu sport=%hu dport=%hu",
&ih->saddr, &ih->daddr, ih->protocol,
ntohs(uh->source), ntohs(uh->dest));
break;
}
case IPPROTO_SCTP: {
struct sctphdr _sctph;
const struct sctphdr *sh;
sh = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_sctph), &_sctph);
if (!sh)
return -ENOMEM;
audit_log_format(ab, " saddr=%pI4 daddr=%pI4 proto=%hhu sport=%hu dport=%hu",
&ih->saddr, &ih->daddr, ih->protocol,
ntohs(sh->source), ntohs(sh->dest));
break;
}
default:
audit_log_format(ab, " saddr=%pI4 daddr=%pI4 proto=%hhu",
&ih->saddr, &ih->daddr, ih->protocol);
}
break;
}
case NFPROTO_IPV6: {
struct ipv6hdr iph;
const struct ipv6hdr *ih;
u8 nexthdr;
__be16 frag_off;
ih = skb_header_pointer(skb, skb_network_offset(skb),
sizeof(iph), &iph);
if (!ih)
return -ENOMEM;
nexthdr = ih->nexthdr;
ipv6_skip_exthdr(skb, skb_network_offset(skb) + sizeof(iph),
&nexthdr, &frag_off);
switch (nexthdr) {
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
th = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_tcph), &_tcph);
if (!th)
return -ENOMEM;
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu sport=%hu dport=%hu",
&ih->saddr, &ih->daddr, nexthdr,
ntohs(th->source), ntohs(th->dest));
break;
}
case IPPROTO_UDP:
case IPPROTO_UDPLITE: {
struct udphdr _udph;
const struct udphdr *uh;
uh = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_udph), &_udph);
if (!uh)
return -ENOMEM;
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu sport=%hu dport=%hu",
&ih->saddr, &ih->daddr, nexthdr,
ntohs(uh->source), ntohs(uh->dest));
break;
}
case IPPROTO_SCTP: {
struct sctphdr _sctph;
const struct sctphdr *sh;
sh = skb_header_pointer(skb, skb_transport_offset(skb),
sizeof(_sctph), &_sctph);
if (!sh)
return -ENOMEM;
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu sport=%hu dport=%hu",
&ih->saddr, &ih->daddr, nexthdr,
ntohs(sh->source), ntohs(sh->dest));
break;
}
default:
audit_log_format(ab, " saddr=%pI6c daddr=%pI6c proto=%hhu",
&ih->saddr, &ih->daddr, nexthdr);
}
break;
}
default:
goto unknown_proto;
}
return 0;
unknown_proto:
audit_log_format(ab, " saddr=? daddr=? proto=?");
return -EPFNOSUPPORT;
}
EXPORT_SYMBOL(audit_log_nf_skb);
/* global counter which is incremented every time something logs in */
static atomic_t session_id = ATOMIC_INIT(0);
static int audit_set_loginuid_perm(kuid_t loginuid)
{
/* if we are unset, we don't need privs */
if (!audit_loginuid_set(current))
return 0;
/* if AUDIT_FEATURE_LOGINUID_IMMUTABLE means never ever allow a change*/
if (is_audit_feature_set(AUDIT_FEATURE_LOGINUID_IMMUTABLE))
return -EPERM;
/* it is set, you need permission */
if (!capable(CAP_AUDIT_CONTROL))
return -EPERM;
/* reject if this is not an unset and we don't allow that */
if (is_audit_feature_set(AUDIT_FEATURE_ONLY_UNSET_LOGINUID)
&& uid_valid(loginuid))
return -EPERM;
return 0;
}
static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
unsigned int oldsessionid,
unsigned int sessionid, int rc)
{
struct audit_buffer *ab;
uid_t uid, oldloginuid, loginuid;
struct tty_struct *tty;
if (!audit_enabled)
return;
ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_LOGIN);
if (!ab)
return;
uid = from_kuid(&init_user_ns, task_uid(current));
oldloginuid = from_kuid(&init_user_ns, koldloginuid);
loginuid = from_kuid(&init_user_ns, kloginuid);
tty = audit_get_tty();
audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
audit_log_task_context(ab);
audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
oldloginuid, loginuid, tty ? tty_name(tty) : "(none)",
oldsessionid, sessionid, !rc);
audit_put_tty(tty);
audit_log_end(ab);
}
/**
* audit_set_loginuid - set current task's loginuid
* @loginuid: loginuid value
*
* Returns 0.
*
* Called (set) from fs/proc/base.c::proc_loginuid_write().
*/
int audit_set_loginuid(kuid_t loginuid)
{
unsigned int oldsessionid, sessionid = AUDIT_SID_UNSET;
kuid_t oldloginuid;
int rc;
oldloginuid = audit_get_loginuid(current);
oldsessionid = audit_get_sessionid(current);
rc = audit_set_loginuid_perm(loginuid);
if (rc)
goto out;
/* are we setting or clearing? */
if (uid_valid(loginuid)) {
sessionid = (unsigned int)atomic_inc_return(&session_id);
if (unlikely(sessionid == AUDIT_SID_UNSET))
sessionid = (unsigned int)atomic_inc_return(&session_id);
}
current->sessionid = sessionid;
current->loginuid = loginuid;
out:
audit_log_set_loginuid(oldloginuid, loginuid, oldsessionid, sessionid, rc);
return rc;
}
/**
* audit_signal_info - record signal info for shutting down audit subsystem
* @sig: signal value
* @t: task being signaled
*
* If the audit subsystem is being terminated, record the task (pid)
* and uid that is doing that.
*/
int audit_signal_info(int sig, struct task_struct *t)
{
kuid_t uid = current_uid(), auid;
if (auditd_test_task(t) &&
(sig == SIGTERM || sig == SIGHUP ||
sig == SIGUSR1 || sig == SIGUSR2)) {
audit_sig_pid = task_tgid_nr(current);
auid = audit_get_loginuid(current);
if (uid_valid(auid))
audit_sig_uid = auid;
else
audit_sig_uid = uid;
security_current_getlsmprop_subj(&audit_sig_lsm);
}
return audit_signal_info_syscall(t);
}
/**
* __audit_log_end - enqueue one audit record
* @skb: the buffer to send
*/
static void __audit_log_end(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
if (audit_rate_check()) {
/* setup the netlink header, see the comments in
* kauditd_send_multicast_skb() for length quirks */
nlh = nlmsg_hdr(skb);
nlh->nlmsg_len = skb->len - NLMSG_HDRLEN;
/* queue the netlink packet */
skb_queue_tail(&audit_queue, skb);
} else {
audit_log_lost("rate limit exceeded");
kfree_skb(skb);
}
}
/**
* audit_log_end - end one audit record
* @ab: the audit_buffer
*
* We can not do a netlink send inside an irq context because it blocks (last
* arg, flags, is not set to MSG_DONTWAIT), so the audit buffer is placed on a
* queue and a kthread is scheduled to remove them from the queue outside the
* irq context. May be called in any context.
*/
void audit_log_end(struct audit_buffer *ab)
{
struct sk_buff *skb;
if (!ab)
return;
while ((skb = skb_dequeue(&ab->skb_list)))
__audit_log_end(skb);
/* poke the kauditd thread */
wake_up_interruptible(&kauditd_wait);
audit_buffer_free(ab);
}
/**
* audit_log - Log an audit record
* @ctx: audit context
* @gfp_mask: type of allocation
* @type: audit message type
* @fmt: format string to use
* @...: variable parameters matching the format string
*
* This is a convenience function that calls audit_log_start,
* audit_log_vformat, and audit_log_end. It may be called
* in any context.
*/
void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
const char *fmt, ...)
{
struct audit_buffer *ab;
va_list args;
ab = audit_log_start(ctx, gfp_mask, type);
if (ab) {
va_start(args, fmt);
audit_log_vformat(ab, fmt, args);
va_end(args);
audit_log_end(ab);
}
}
EXPORT_SYMBOL(audit_log_start);
EXPORT_SYMBOL(audit_log_end);
EXPORT_SYMBOL(audit_log_format);
EXPORT_SYMBOL(audit_log); | c | github | https://github.com/torvalds/linux | kernel/audit.c |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("MinMaxScalerExample")\
.getOrCreate()
# $example on$
dataFrame = spark.createDataFrame([
(0, Vectors.dense([1.0, 0.1, -1.0]),),
(1, Vectors.dense([2.0, 1.1, 1.0]),),
(2, Vectors.dense([3.0, 10.1, 3.0]),)
], ["id", "features"])
scaler = MinMaxScaler(inputCol="features", outputCol="scaledFeatures")
# Compute summary statistics and generate MinMaxScalerModel
scalerModel = scaler.fit(dataFrame)
# rescale each feature to range [min, max].
scaledData = scalerModel.transform(dataFrame)
print("Features scaled to range: [%f, %f]" % (scaler.getMin(), scaler.getMax()))
scaledData.select("features", "scaledFeatures").show()
# $example off$
spark.stop() | unknown | codeparrot/codeparrot-clean | ||
# Local development dependencies including docs building, website upload, ASV benchmark
name: pandas-dev
channels:
- conda-forge
dependencies:
- python=3.11
- pip
# build dependencies
- versioneer
- cython>=3.1.0,<4.0.0a0
- meson>=1.2.1,<2
- meson-python>=0.17.1,<1
# test dependencies
- pytest>=8.3.4
- pytest-cov
- pytest-xdist>=3.6.1
- pytest-qt>=4.4.0
- pytest-localserver
- pyqt>=5.15.9
- coverage
# required dependencies
- python-dateutil
- numpy<3
# optional dependencies
- adbc-driver-postgresql>=1.2.0
- adbc-driver-sqlite>=1.2.0
- beautifulsoup4>=4.12.3
- bottleneck>=1.4.2
- fastparquet>=2024.11.0
- fsspec>=2024.10.0
- html5lib>=1.1
- hypothesis>=6.116.0
- gcsfs>=2024.10.0
- jinja2>=3.1.5
- lxml>=5.3.0
- matplotlib>=3.9.3
- numba>=0.60.0
- numexpr>=2.10.2
- openpyxl>=3.1.5
- odfpy>=1.4.1
- psycopg2>=2.9.10
- pyarrow>=13.0.0
- pyiceberg>=0.8.1
- pymysql>=1.1.1
- pyreadstat>=1.2.8
- pytables>=3.10.1
- python-calamine>=0.3.0
- pytz>=2024.2
- pyxlsb>=1.0.10
- s3fs>=2024.10.0
- scipy>=1.14.1
- sqlalchemy>=2.0.36
- tabulate>=0.9.0
- xarray>=2024.10.0
- xlrd>=2.0.1
- xlsxwriter>=3.2.0
- zstandard>=0.23.0
# downstream packages
- dask-core
- seaborn-base
- ipython
# Mocking s3 tests
- moto
# benchmarks
- asv>=0.6.1
## The compiler packages are meta-packages and install the correct compiler (activation) packages on the respective platforms.
- c-compiler
- cxx-compiler
# code checks
- mypy=1.17.1 # pre-commit uses locally installed mypy
- tokenize-rt # scripts/check_for_inconsistent_pandas_namespace.py
- pre-commit>=4.2.0
# documentation
- gitpython # obtain contributors from git for whatsnew
- natsort # DataFrame.sort_values doctest
- pickleshare # Needed for IPython Sphinx directive in the docs GH#60429
- numpydoc
# temporary installed with pip with custom patch until released
# - pydata-sphinx-theme=0.16
- sphinx
- sphinx-design
- sphinx-copybutton
# static typing
- scipy-stubs
- types-python-dateutil
- types-PyMySQL
- types-pytz
- types-PyYAML
# documentation (jupyter notebooks)
- nbconvert>=7.11.0
- nbsphinx
- pandoc
- ipywidgets
- nbformat
- notebook>=7.0.6
- ipykernel
# web
# - jinja2 # already listed in optional dependencies, but documented here for reference
- markdown
- feedparser
- pyyaml
- requests
- pygments # Code highlighting
# web interactive REPL
# see the following links for more context:
# 1. https://jupyterlite-pyodide-kernel.readthedocs.io/en/stable/#compatibility
# 2. https://pyodide.org/en/stable/usage/packages-in-pyodide.html
- jupyterlite-core
- jupyterlite-pyodide-kernel
- pip:
- tzdata>=2023.3
- https://github.com/jorisvandenbossche/pydata-sphinx-theme/archive/refs/heads/v0.16.1+dismissable-announcement-banner.zip
- pytest-cython==0.4.0rc1 # doctest | unknown | github | https://github.com/pandas-dev/pandas | environment.yml |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
# TODO(opensource): Add support for pyx_library in the open-source build.
# For now, we use the slow versions that fast_tensor_util replaces.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16))
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend([
ExtractBitsFromFloat16(x) for x in proto_values])
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
# TODO(sesse): We should have a
# fast_tensor_util.AppendFloat16ArrayToTensorProto,
# but it seems np.float16_t doesn't exist?
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64: fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32: fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64: fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8: fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16: fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.int8: fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16: fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64: fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128: fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object: fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool: fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x[0]) for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
_NP_TO_APPEND_FN = {
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, list):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32,
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(v, compat.integral_types) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.bool: _FilterBool,
dtypes.complex128: _FilterComplex,
dtypes.complex64: _FilterComplex,
dtypes.float32: _FilterFloat,
dtypes.float64: _FilterFloat,
dtypes.int16: _FilterInt,
dtypes.int32: _FilterInt,
dtypes.int64: _FilterInt,
dtypes.int8: _FilterInt,
dtypes.qint16: _FilterInt,
dtypes.qint32: _FilterInt,
dtypes.qint8: _FilterInt,
dtypes.quint16: _FilterInt,
dtypes.quint8: _FilterInt,
dtypes.string: _FilterStr,
dtypes.uint16: _FilterInt,
dtypes.uint8: _FilterInt,
}
def _AssertCompatible(values, dtype):
fn = _TF_TO_IS_OK.get(dtype, _FilterNotTensor)
mismatch = fn(values)
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def make_tensor_proto(values, dtype=None, shape=None):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
Returns:
A TensorProto. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with tensor_util.MakeNdarray(proto).
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (dtype in [dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32])
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
np_dt = dtype.as_numpy_dtype if dtype else None
if np.prod(shape) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" % (
values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s" % (dtype, nparray.dtype))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape)
is_same_size = shape_size == nparray.size
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError("Element type not supported in TensorProto: %s" %
numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
if len(tensor.half_val) == 1:
tmp = np.array(tensor.half_val[0], dtype=np.uint16)
tmp.dtype = np.float16
return np.repeat(tmp, num_elements).reshape(shape)
else:
tmp = np.fromiter(tensor.half_val, dtype=np.uint16)
tmp.dtype = np.float16
return tmp.reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(np.array(tensor.float_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(np.array(tensor.double_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16,
dtypes.int8, dtypes.qint32, dtypes.quint8, dtypes.qint8,
dtypes.qint16, dtypes.quint16, dtypes.bfloat16]:
if len(tensor.int_val) == 1:
return np.repeat(np.array(tensor.int_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(np.array(tensor.int64_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(np.array(tensor.string_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([x for x in tensor.string_val],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(np.array(complex(tensor.scomplex_val[0],
tensor.scomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
if len(tensor.dcomplex_val) == 2:
return np.repeat(np.array(complex(tensor.dcomplex_val[0],
tensor.dcomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array([dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.array([input_shape.ndims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
for x in tensor.op.inputs:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.array(values)
else:
return None
def constant_value(tensor):
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
TODO(mrry): Consider whether this function should use a registration
mechanism like gradients and ShapeFunctions, so that it is easily
extensible.
NOTE: If `constant_value(tensor)` returns a non-`None` result, it will no
longer be possible to feed a different value for `tensor`. This allows the
result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
ret = _ConstantValue(tensor)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
"""
shape = tensor.get_shape().with_rank(1)
if tensor.get_shape() == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
else:
ret = tensor_shape.unknown_shape(shape[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(tensor_shape.TensorShape(
[d if d != -1 else None for d in value]))
return ret | unknown | codeparrot/codeparrot-clean | ||
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.DiskChecker.FileIoProvider;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.concurrent.atomic.AtomicInteger;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Verify {@link DiskChecker} validation routines that perform
* Disk IO.
*/
@Timeout(30)
public final class TestDiskCheckerWithDiskIo {
/**
* Verify DiskChecker ignores at least 2 transient file creation errors.
*/
@Test
public final void testDiskIoIgnoresTransientCreateErrors() throws Throwable {
DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
DiskChecker.DISK_IO_MAX_ITERATIONS - 1, 0));
checkDirs(true);
}
/**
* Verify DiskChecker bails after 3 file creation errors.
*/
@Test
public final void testDiskIoDetectsCreateErrors() throws Throwable {
assertThrows(DiskErrorException.class, () -> {
DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
DiskChecker.DISK_IO_MAX_ITERATIONS, 0));
checkDirs(false);
});
}
/**
* Verify DiskChecker ignores at least 2 transient file write errors.
*/
@Test
public final void testDiskIoIgnoresTransientWriteErrors() throws Throwable {
DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
0, DiskChecker.DISK_IO_MAX_ITERATIONS - 1));
checkDirs(true);
}
/**
* Verify DiskChecker bails after 3 file write errors.
*/
@Test
public final void testDiskIoDetectsWriteErrors() throws Throwable {
assertThrows(DiskErrorException.class, ()->{
DiskChecker.replaceFileOutputStreamProvider(new TestFileIoProvider(
0, DiskChecker.DISK_IO_MAX_ITERATIONS));
checkDirs(false);
});
}
/**
* Verify DiskChecker's test file naming scheme.
*/
@Test
public void testDiskIoFileNaming() {
final File rootDir = new File("/");
assertTrue(".001".matches("\\.00\\d$"));
for (int i = 1; i < DiskChecker.DISK_IO_MAX_ITERATIONS; ++i) {
final File file = DiskChecker.getFileNameForDiskIoCheck(rootDir, i);
assertTrue(file.toString().matches("^.*\\.[0-9]+$"),
"File name does not match expected pattern: " + file);
}
final File guidFile = DiskChecker.getFileNameForDiskIoCheck(
rootDir, DiskChecker.DISK_IO_MAX_ITERATIONS);
assertTrue(guidFile.toString().matches("^.*\\.[A-Za-z0-9-]+$"),
"File name does not match expected pattern: " + guidFile);
}
/**
* A dummy {@link DiskChecker#FileIoProvider} that can throw a programmable
* number of times.
*/
private static class TestFileIoProvider implements FileIoProvider {
private final AtomicInteger numCreateCalls = new AtomicInteger(0);
private final AtomicInteger numWriteCalls = new AtomicInteger(0);
private final int numTimesToThrowOnCreate;
private final int numTimesToThrowOnWrite;
public TestFileIoProvider(
int numTimesToThrowOnCreate, int numTimesToThrowOnWrite) {
this.numTimesToThrowOnCreate = numTimesToThrowOnCreate;
this.numTimesToThrowOnWrite = numTimesToThrowOnWrite;
}
/**
* {@inheritDoc}
*/
@Override
public FileOutputStream get(File f) throws FileNotFoundException {
if (numCreateCalls.getAndIncrement() < numTimesToThrowOnCreate) {
throw new FileNotFoundException("Dummy exception for testing");
}
// Can't mock final class FileOutputStream.
return new FileOutputStream(f);
}
/**
* {@inheritDoc}
*/
@Override
public void write(FileOutputStream fos, byte[] data) throws IOException {
if (numWriteCalls.getAndIncrement() < numTimesToThrowOnWrite) {
throw new IOException("Dummy exception for testing");
}
fos.write(data);
}
}
private void checkDirs(boolean success)
throws Throwable {
File localDir = createTempDir();
try {
DiskChecker.checkDirWithDiskIo(localDir);
} finally {
localDir.delete();
}
}
/**
* Create an empty directory with a random name under test directory
* with Posix permissions "0755".
*
* @return the created directory
* @throws java.io.IOException if any
*/
private File createTempDir() throws java.io.IOException {
final File testDir = new File(System.getProperty("test.build.data"));
return Files.createTempDirectory(testDir.toPath(), "test",
PosixFilePermissions.asFileAttribute(
PosixFilePermissions.fromString("rwxr-xr-x"))).toFile();
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDiskCheckerWithDiskIo.java |
# subrepo.py - sub-repository handling for Mercurial
#
# Copyright 2009-2010 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import copy
import errno
import os
import posixpath
import re
import stat
import subprocess
import sys
import tarfile
import xml.dom.minidom
from .i18n import _
from . import (
cmdutil,
config,
error,
exchange,
match as matchmod,
node,
pathutil,
phases,
scmutil,
util,
)
hg = None
propertycache = util.propertycache
nullstate = ('', '', 'empty')
def _expandedabspath(path):
'''
get a path or url and if it is a path expand it and return an absolute path
'''
expandedpath = util.urllocalpath(util.expandpath(path))
u = util.url(expandedpath)
if not u.scheme:
path = util.normpath(os.path.abspath(u.path))
return path
def _getstorehashcachename(remotepath):
'''get a unique filename for the store hash cache of a remote repository'''
return util.sha1(_expandedabspath(remotepath)).hexdigest()[0:12]
class SubrepoAbort(error.Abort):
"""Exception class used to avoid handling a subrepo error more than once"""
def __init__(self, *args, **kw):
error.Abort.__init__(self, *args, **kw)
self.subrepo = kw.get('subrepo')
self.cause = kw.get('cause')
def annotatesubrepoerror(func):
def decoratedmethod(self, *args, **kargs):
try:
res = func(self, *args, **kargs)
except SubrepoAbort as ex:
# This exception has already been handled
raise ex
except error.Abort as ex:
subrepo = subrelpath(self)
errormsg = str(ex) + ' ' + _('(in subrepo %s)') % subrepo
# avoid handling this exception by raising a SubrepoAbort exception
raise SubrepoAbort(errormsg, hint=ex.hint, subrepo=subrepo,
cause=sys.exc_info())
return res
return decoratedmethod
def state(ctx, ui):
"""return a state dict, mapping subrepo paths configured in .hgsub
to tuple: (source from .hgsub, revision from .hgsubstate, kind
(key in types dict))
"""
p = config.config()
repo = ctx.repo()
def read(f, sections=None, remap=None):
if f in ctx:
try:
data = ctx[f].data()
except IOError as err:
if err.errno != errno.ENOENT:
raise
# handle missing subrepo spec files as removed
ui.warn(_("warning: subrepo spec file \'%s\' not found\n") %
repo.pathto(f))
return
p.parse(f, data, sections, remap, read)
else:
raise error.Abort(_("subrepo spec file \'%s\' not found") %
repo.pathto(f))
if '.hgsub' in ctx:
read('.hgsub')
for path, src in ui.configitems('subpaths'):
p.set('subpaths', path, src, ui.configsource('subpaths', path))
rev = {}
if '.hgsubstate' in ctx:
try:
for i, l in enumerate(ctx['.hgsubstate'].data().splitlines()):
l = l.lstrip()
if not l:
continue
try:
revision, path = l.split(" ", 1)
except ValueError:
raise error.Abort(_("invalid subrepository revision "
"specifier in \'%s\' line %d")
% (repo.pathto('.hgsubstate'), (i + 1)))
rev[path] = revision
except IOError as err:
if err.errno != errno.ENOENT:
raise
def remap(src):
for pattern, repl in p.items('subpaths'):
# Turn r'C:\foo\bar' into r'C:\\foo\\bar' since re.sub
# does a string decode.
repl = repl.encode('string-escape')
# However, we still want to allow back references to go
# through unharmed, so we turn r'\\1' into r'\1'. Again,
# extra escapes are needed because re.sub string decodes.
repl = re.sub(r'\\\\([0-9]+)', r'\\\1', repl)
try:
src = re.sub(pattern, repl, src, 1)
except re.error as e:
raise error.Abort(_("bad subrepository pattern in %s: %s")
% (p.source('subpaths', pattern), e))
return src
state = {}
for path, src in p[''].items():
kind = 'hg'
if src.startswith('['):
if ']' not in src:
raise error.Abort(_('missing ] in subrepo source'))
kind, src = src.split(']', 1)
kind = kind[1:]
src = src.lstrip() # strip any extra whitespace after ']'
if not util.url(src).isabs():
parent = _abssource(repo, abort=False)
if parent:
parent = util.url(parent)
parent.path = posixpath.join(parent.path or '', src)
parent.path = posixpath.normpath(parent.path)
joined = str(parent)
# Remap the full joined path and use it if it changes,
# else remap the original source.
remapped = remap(joined)
if remapped == joined:
src = remap(src)
else:
src = remapped
src = remap(src)
state[util.pconvert(path)] = (src.strip(), rev.get(path, ''), kind)
return state
def writestate(repo, state):
"""rewrite .hgsubstate in (outer) repo with these subrepo states"""
lines = ['%s %s\n' % (state[s][1], s) for s in sorted(state)
if state[s][1] != nullstate[1]]
repo.wwrite('.hgsubstate', ''.join(lines), '')
def submerge(repo, wctx, mctx, actx, overwrite):
"""delegated from merge.applyupdates: merging of .hgsubstate file
in working context, merging context and ancestor context"""
if mctx == actx: # backwards?
actx = wctx.p1()
s1 = wctx.substate
s2 = mctx.substate
sa = actx.substate
sm = {}
repo.ui.debug("subrepo merge %s %s %s\n" % (wctx, mctx, actx))
def debug(s, msg, r=""):
if r:
r = "%s:%s:%s" % r
repo.ui.debug(" subrepo %s: %s %s\n" % (s, msg, r))
for s, l in sorted(s1.iteritems()):
a = sa.get(s, nullstate)
ld = l # local state with possible dirty flag for compares
if wctx.sub(s).dirty():
ld = (l[0], l[1] + "+")
if wctx == actx: # overwrite
a = ld
if s in s2:
r = s2[s]
if ld == r or r == a: # no change or local is newer
sm[s] = l
continue
elif ld == a: # other side changed
debug(s, "other changed, get", r)
wctx.sub(s).get(r, overwrite)
sm[s] = r
elif ld[0] != r[0]: # sources differ
if repo.ui.promptchoice(
_(' subrepository sources for %s differ\n'
'use (l)ocal source (%s) or (r)emote source (%s)?'
'$$ &Local $$ &Remote') % (s, l[0], r[0]), 0):
debug(s, "prompt changed, get", r)
wctx.sub(s).get(r, overwrite)
sm[s] = r
elif ld[1] == a[1]: # local side is unchanged
debug(s, "other side changed, get", r)
wctx.sub(s).get(r, overwrite)
sm[s] = r
else:
debug(s, "both sides changed")
srepo = wctx.sub(s)
option = repo.ui.promptchoice(
_(' subrepository %s diverged (local revision: %s, '
'remote revision: %s)\n'
'(M)erge, keep (l)ocal or keep (r)emote?'
'$$ &Merge $$ &Local $$ &Remote')
% (s, srepo.shortid(l[1]), srepo.shortid(r[1])), 0)
if option == 0:
wctx.sub(s).merge(r)
sm[s] = l
debug(s, "merge with", r)
elif option == 1:
sm[s] = l
debug(s, "keep local subrepo revision", l)
else:
wctx.sub(s).get(r, overwrite)
sm[s] = r
debug(s, "get remote subrepo revision", r)
elif ld == a: # remote removed, local unchanged
debug(s, "remote removed, remove")
wctx.sub(s).remove()
elif a == nullstate: # not present in remote or ancestor
debug(s, "local added, keep")
sm[s] = l
continue
else:
if repo.ui.promptchoice(
_(' local changed subrepository %s which remote removed\n'
'use (c)hanged version or (d)elete?'
'$$ &Changed $$ &Delete') % s, 0):
debug(s, "prompt remove")
wctx.sub(s).remove()
for s, r in sorted(s2.items()):
if s in s1:
continue
elif s not in sa:
debug(s, "remote added, get", r)
mctx.sub(s).get(r)
sm[s] = r
elif r != sa[s]:
if repo.ui.promptchoice(
_(' remote changed subrepository %s which local removed\n'
'use (c)hanged version or (d)elete?'
'$$ &Changed $$ &Delete') % s, 0) == 0:
debug(s, "prompt recreate", r)
mctx.sub(s).get(r)
sm[s] = r
# record merged .hgsubstate
writestate(repo, sm)
return sm
def _updateprompt(ui, sub, dirty, local, remote):
if dirty:
msg = (_(' subrepository sources for %s differ\n'
'use (l)ocal source (%s) or (r)emote source (%s)?'
'$$ &Local $$ &Remote')
% (subrelpath(sub), local, remote))
else:
msg = (_(' subrepository sources for %s differ (in checked out '
'version)\n'
'use (l)ocal source (%s) or (r)emote source (%s)?'
'$$ &Local $$ &Remote')
% (subrelpath(sub), local, remote))
return ui.promptchoice(msg, 0)
def reporelpath(repo):
"""return path to this (sub)repo as seen from outermost repo"""
parent = repo
while util.safehasattr(parent, '_subparent'):
parent = parent._subparent
return repo.root[len(pathutil.normasprefix(parent.root)):]
def subrelpath(sub):
"""return path to this subrepo as seen from outermost repo"""
return sub._relpath
def _abssource(repo, push=False, abort=True):
"""return pull/push path of repo - either based on parent repo .hgsub info
or on the top repo config. Abort or return None if no source found."""
if util.safehasattr(repo, '_subparent'):
source = util.url(repo._subsource)
if source.isabs():
return str(source)
source.path = posixpath.normpath(source.path)
parent = _abssource(repo._subparent, push, abort=False)
if parent:
parent = util.url(util.pconvert(parent))
parent.path = posixpath.join(parent.path or '', source.path)
parent.path = posixpath.normpath(parent.path)
return str(parent)
else: # recursion reached top repo
if util.safehasattr(repo, '_subtoppath'):
return repo._subtoppath
if push and repo.ui.config('paths', 'default-push'):
return repo.ui.config('paths', 'default-push')
if repo.ui.config('paths', 'default'):
return repo.ui.config('paths', 'default')
if repo.shared():
# chop off the .hg component to get the default path form
return os.path.dirname(repo.sharedpath)
if abort:
raise error.Abort(_("default path for subrepository not found"))
def _sanitize(ui, vfs, ignore):
for dirname, dirs, names in vfs.walk():
for i, d in enumerate(dirs):
if d.lower() == ignore:
del dirs[i]
break
if vfs.basename(dirname).lower() != '.hg':
continue
for f in names:
if f.lower() == 'hgrc':
ui.warn(_("warning: removing potentially hostile 'hgrc' "
"in '%s'\n") % vfs.join(dirname))
vfs.unlink(vfs.reljoin(dirname, f))
def subrepo(ctx, path, allowwdir=False):
"""return instance of the right subrepo class for subrepo in path"""
# subrepo inherently violates our import layering rules
# because it wants to make repo objects from deep inside the stack
# so we manually delay the circular imports to not break
# scripts that don't use our demand-loading
global hg
from . import hg as h
hg = h
pathutil.pathauditor(ctx.repo().root)(path)
state = ctx.substate[path]
if state[2] not in types:
raise error.Abort(_('unknown subrepo type %s') % state[2])
if allowwdir:
state = (state[0], ctx.subrev(path), state[2])
return types[state[2]](ctx, path, state[:2])
def nullsubrepo(ctx, path, pctx):
"""return an empty subrepo in pctx for the extant subrepo in ctx"""
# subrepo inherently violates our import layering rules
# because it wants to make repo objects from deep inside the stack
# so we manually delay the circular imports to not break
# scripts that don't use our demand-loading
global hg
from . import hg as h
hg = h
pathutil.pathauditor(ctx.repo().root)(path)
state = ctx.substate[path]
if state[2] not in types:
raise error.Abort(_('unknown subrepo type %s') % state[2])
subrev = ''
if state[2] == 'hg':
subrev = "0" * 40
return types[state[2]](pctx, path, (state[0], subrev))
def newcommitphase(ui, ctx):
commitphase = phases.newcommitphase(ui)
substate = getattr(ctx, "substate", None)
if not substate:
return commitphase
check = ui.config('phases', 'checksubrepos', 'follow')
if check not in ('ignore', 'follow', 'abort'):
raise error.Abort(_('invalid phases.checksubrepos configuration: %s')
% (check))
if check == 'ignore':
return commitphase
maxphase = phases.public
maxsub = None
for s in sorted(substate):
sub = ctx.sub(s)
subphase = sub.phase(substate[s][1])
if maxphase < subphase:
maxphase = subphase
maxsub = s
if commitphase < maxphase:
if check == 'abort':
raise error.Abort(_("can't commit in %s phase"
" conflicting %s from subrepository %s") %
(phases.phasenames[commitphase],
phases.phasenames[maxphase], maxsub))
ui.warn(_("warning: changes are committed in"
" %s phase from subrepository %s\n") %
(phases.phasenames[maxphase], maxsub))
return maxphase
return commitphase
# subrepo classes need to implement the following abstract class:
class abstractsubrepo(object):
def __init__(self, ctx, path):
"""Initialize abstractsubrepo part
``ctx`` is the context referring this subrepository in the
parent repository.
``path`` is the path to this subrepository as seen from
innermost repository.
"""
self.ui = ctx.repo().ui
self._ctx = ctx
self._path = path
def storeclean(self, path):
"""
returns true if the repository has not changed since it was last
cloned from or pushed to a given repository.
"""
return False
def dirty(self, ignoreupdate=False):
"""returns true if the dirstate of the subrepo is dirty or does not
match current stored state. If ignoreupdate is true, only check
whether the subrepo has uncommitted changes in its dirstate.
"""
raise NotImplementedError
def dirtyreason(self, ignoreupdate=False):
"""return reason string if it is ``dirty()``
Returned string should have enough information for the message
of exception.
This returns None, otherwise.
"""
if self.dirty(ignoreupdate=ignoreupdate):
return _("uncommitted changes in subrepository '%s'"
) % subrelpath(self)
def bailifchanged(self, ignoreupdate=False):
"""raise Abort if subrepository is ``dirty()``
"""
dirtyreason = self.dirtyreason(ignoreupdate=ignoreupdate)
if dirtyreason:
raise error.Abort(dirtyreason)
def basestate(self):
"""current working directory base state, disregarding .hgsubstate
state and working directory modifications"""
raise NotImplementedError
def checknested(self, path):
"""check if path is a subrepository within this repository"""
return False
def commit(self, text, user, date):
"""commit the current changes to the subrepo with the given
log message. Use given user and date if possible. Return the
new state of the subrepo.
"""
raise NotImplementedError
def phase(self, state):
"""returns phase of specified state in the subrepository.
"""
return phases.public
def remove(self):
"""remove the subrepo
(should verify the dirstate is not dirty first)
"""
raise NotImplementedError
def get(self, state, overwrite=False):
"""run whatever commands are needed to put the subrepo into
this state
"""
raise NotImplementedError
def merge(self, state):
"""merge currently-saved state with the new state."""
raise NotImplementedError
def push(self, opts):
"""perform whatever action is analogous to 'hg push'
This may be a no-op on some systems.
"""
raise NotImplementedError
def add(self, ui, match, prefix, explicitonly, **opts):
return []
def addremove(self, matcher, prefix, opts, dry_run, similarity):
self.ui.warn("%s: %s" % (prefix, _("addremove is not supported")))
return 1
def cat(self, match, prefix, **opts):
return 1
def status(self, rev2, **opts):
return scmutil.status([], [], [], [], [], [], [])
def diff(self, ui, diffopts, node2, match, prefix, **opts):
pass
def outgoing(self, ui, dest, opts):
return 1
def incoming(self, ui, source, opts):
return 1
def files(self):
"""return filename iterator"""
raise NotImplementedError
def filedata(self, name):
"""return file data"""
raise NotImplementedError
def fileflags(self, name):
"""return file flags"""
return ''
def getfileset(self, expr):
"""Resolve the fileset expression for this repo"""
return set()
def printfiles(self, ui, m, fm, fmt, subrepos):
"""handle the files command for this subrepo"""
return 1
def archive(self, archiver, prefix, match=None):
if match is not None:
files = [f for f in self.files() if match(f)]
else:
files = self.files()
total = len(files)
relpath = subrelpath(self)
self.ui.progress(_('archiving (%s)') % relpath, 0,
unit=_('files'), total=total)
for i, name in enumerate(files):
flags = self.fileflags(name)
mode = 'x' in flags and 0o755 or 0o644
symlink = 'l' in flags
archiver.addfile(prefix + self._path + '/' + name,
mode, symlink, self.filedata(name))
self.ui.progress(_('archiving (%s)') % relpath, i + 1,
unit=_('files'), total=total)
self.ui.progress(_('archiving (%s)') % relpath, None)
return total
def walk(self, match):
'''
walk recursively through the directory tree, finding all files
matched by the match function
'''
pass
def forget(self, match, prefix):
return ([], [])
def removefiles(self, matcher, prefix, after, force, subrepos):
"""remove the matched files from the subrepository and the filesystem,
possibly by force and/or after the file has been removed from the
filesystem. Return 0 on success, 1 on any warning.
"""
return 1
def revert(self, substate, *pats, **opts):
self.ui.warn('%s: reverting %s subrepos is unsupported\n' \
% (substate[0], substate[2]))
return []
def shortid(self, revid):
return revid
def verify(self):
'''verify the integrity of the repository. Return 0 on success or
warning, 1 on any error.
'''
return 0
@propertycache
def wvfs(self):
"""return vfs to access the working directory of this subrepository
"""
return scmutil.vfs(self._ctx.repo().wvfs.join(self._path))
@propertycache
def _relpath(self):
"""return path to this subrepository as seen from outermost repository
"""
return self.wvfs.reljoin(reporelpath(self._ctx.repo()), self._path)
class hgsubrepo(abstractsubrepo):
def __init__(self, ctx, path, state):
super(hgsubrepo, self).__init__(ctx, path)
self._state = state
r = ctx.repo()
root = r.wjoin(path)
create = not r.wvfs.exists('%s/.hg' % path)
self._repo = hg.repository(r.baseui, root, create=create)
# Propagate the parent's --hidden option
if r is r.unfiltered():
self._repo = self._repo.unfiltered()
self.ui = self._repo.ui
for s, k in [('ui', 'commitsubrepos')]:
v = r.ui.config(s, k)
if v:
self.ui.setconfig(s, k, v, 'subrepo')
# internal config: ui._usedassubrepo
self.ui.setconfig('ui', '_usedassubrepo', 'True', 'subrepo')
self._initrepo(r, state[0], create)
def storeclean(self, path):
with self._repo.lock():
return self._storeclean(path)
def _storeclean(self, path):
clean = True
itercache = self._calcstorehash(path)
for filehash in self._readstorehashcache(path):
if filehash != next(itercache, None):
clean = False
break
if clean:
# if not empty:
# the cached and current pull states have a different size
clean = next(itercache, None) is None
return clean
def _calcstorehash(self, remotepath):
'''calculate a unique "store hash"
This method is used to to detect when there are changes that may
require a push to a given remote path.'''
# sort the files that will be hashed in increasing (likely) file size
filelist = ('bookmarks', 'store/phaseroots', 'store/00changelog.i')
yield '# %s\n' % _expandedabspath(remotepath)
vfs = self._repo.vfs
for relname in filelist:
filehash = util.sha1(vfs.tryread(relname)).hexdigest()
yield '%s = %s\n' % (relname, filehash)
@propertycache
def _cachestorehashvfs(self):
return scmutil.vfs(self._repo.join('cache/storehash'))
def _readstorehashcache(self, remotepath):
'''read the store hash cache for a given remote repository'''
cachefile = _getstorehashcachename(remotepath)
return self._cachestorehashvfs.tryreadlines(cachefile, 'r')
def _cachestorehash(self, remotepath):
'''cache the current store hash
Each remote repo requires its own store hash cache, because a subrepo
store may be "clean" versus a given remote repo, but not versus another
'''
cachefile = _getstorehashcachename(remotepath)
with self._repo.lock():
storehash = list(self._calcstorehash(remotepath))
vfs = self._cachestorehashvfs
vfs.writelines(cachefile, storehash, mode='w', notindexed=True)
def _getctx(self):
'''fetch the context for this subrepo revision, possibly a workingctx
'''
if self._ctx.rev() is None:
return self._repo[None] # workingctx if parent is workingctx
else:
rev = self._state[1]
return self._repo[rev]
@annotatesubrepoerror
def _initrepo(self, parentrepo, source, create):
self._repo._subparent = parentrepo
self._repo._subsource = source
if create:
lines = ['[paths]\n']
def addpathconfig(key, value):
if value:
lines.append('%s = %s\n' % (key, value))
self.ui.setconfig('paths', key, value, 'subrepo')
defpath = _abssource(self._repo, abort=False)
defpushpath = _abssource(self._repo, True, abort=False)
addpathconfig('default', defpath)
if defpath != defpushpath:
addpathconfig('default-push', defpushpath)
fp = self._repo.vfs("hgrc", "w", text=True)
try:
fp.write(''.join(lines))
finally:
fp.close()
@annotatesubrepoerror
def add(self, ui, match, prefix, explicitonly, **opts):
return cmdutil.add(ui, self._repo, match,
self.wvfs.reljoin(prefix, self._path),
explicitonly, **opts)
@annotatesubrepoerror
def addremove(self, m, prefix, opts, dry_run, similarity):
# In the same way as sub directories are processed, once in a subrepo,
# always entry any of its subrepos. Don't corrupt the options that will
# be used to process sibling subrepos however.
opts = copy.copy(opts)
opts['subrepos'] = True
return scmutil.addremove(self._repo, m,
self.wvfs.reljoin(prefix, self._path), opts,
dry_run, similarity)
@annotatesubrepoerror
def cat(self, match, prefix, **opts):
rev = self._state[1]
ctx = self._repo[rev]
return cmdutil.cat(self.ui, self._repo, ctx, match, prefix, **opts)
@annotatesubrepoerror
def status(self, rev2, **opts):
try:
rev1 = self._state[1]
ctx1 = self._repo[rev1]
ctx2 = self._repo[rev2]
return self._repo.status(ctx1, ctx2, **opts)
except error.RepoLookupError as inst:
self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
% (inst, subrelpath(self)))
return scmutil.status([], [], [], [], [], [], [])
@annotatesubrepoerror
def diff(self, ui, diffopts, node2, match, prefix, **opts):
try:
node1 = node.bin(self._state[1])
# We currently expect node2 to come from substate and be
# in hex format
if node2 is not None:
node2 = node.bin(node2)
cmdutil.diffordiffstat(ui, self._repo, diffopts,
node1, node2, match,
prefix=posixpath.join(prefix, self._path),
listsubrepos=True, **opts)
except error.RepoLookupError as inst:
self.ui.warn(_('warning: error "%s" in subrepository "%s"\n')
% (inst, subrelpath(self)))
@annotatesubrepoerror
def archive(self, archiver, prefix, match=None):
self._get(self._state + ('hg',))
total = abstractsubrepo.archive(self, archiver, prefix, match)
rev = self._state[1]
ctx = self._repo[rev]
for subpath in ctx.substate:
s = subrepo(ctx, subpath, True)
submatch = matchmod.narrowmatcher(subpath, match)
total += s.archive(archiver, prefix + self._path + '/', submatch)
return total
@annotatesubrepoerror
def dirty(self, ignoreupdate=False):
r = self._state[1]
if r == '' and not ignoreupdate: # no state recorded
return True
w = self._repo[None]
if r != w.p1().hex() and not ignoreupdate:
# different version checked out
return True
return w.dirty() # working directory changed
def basestate(self):
return self._repo['.'].hex()
def checknested(self, path):
return self._repo._checknested(self._repo.wjoin(path))
@annotatesubrepoerror
def commit(self, text, user, date):
# don't bother committing in the subrepo if it's only been
# updated
if not self.dirty(True):
return self._repo['.'].hex()
self.ui.debug("committing subrepo %s\n" % subrelpath(self))
n = self._repo.commit(text, user, date)
if not n:
return self._repo['.'].hex() # different version checked out
return node.hex(n)
@annotatesubrepoerror
def phase(self, state):
return self._repo[state].phase()
@annotatesubrepoerror
def remove(self):
# we can't fully delete the repository as it may contain
# local-only history
self.ui.note(_('removing subrepo %s\n') % subrelpath(self))
hg.clean(self._repo, node.nullid, False)
def _get(self, state):
source, revision, kind = state
if revision in self._repo.unfiltered():
return True
self._repo._subsource = source
srcurl = _abssource(self._repo)
other = hg.peer(self._repo, {}, srcurl)
if len(self._repo) == 0:
self.ui.status(_('cloning subrepo %s from %s\n')
% (subrelpath(self), srcurl))
parentrepo = self._repo._subparent
# use self._repo.vfs instead of self.wvfs to remove .hg only
self._repo.vfs.rmtree()
other, cloned = hg.clone(self._repo._subparent.baseui, {},
other, self._repo.root,
update=False)
self._repo = cloned.local()
self._initrepo(parentrepo, source, create=True)
self._cachestorehash(srcurl)
else:
self.ui.status(_('pulling subrepo %s from %s\n')
% (subrelpath(self), srcurl))
cleansub = self.storeclean(srcurl)
exchange.pull(self._repo, other)
if cleansub:
# keep the repo clean after pull
self._cachestorehash(srcurl)
return False
@annotatesubrepoerror
def get(self, state, overwrite=False):
inrepo = self._get(state)
source, revision, kind = state
repo = self._repo
repo.ui.debug("getting subrepo %s\n" % self._path)
if inrepo:
urepo = repo.unfiltered()
ctx = urepo[revision]
if ctx.hidden():
urepo.ui.warn(
_('revision %s in subrepo %s is hidden\n') \
% (revision[0:12], self._path))
repo = urepo
hg.updaterepo(repo, revision, overwrite)
@annotatesubrepoerror
def merge(self, state):
self._get(state)
cur = self._repo['.']
dst = self._repo[state[1]]
anc = dst.ancestor(cur)
def mergefunc():
if anc == cur and dst.branch() == cur.branch():
self.ui.debug("updating subrepo %s\n" % subrelpath(self))
hg.update(self._repo, state[1])
elif anc == dst:
self.ui.debug("skipping subrepo %s\n" % subrelpath(self))
else:
self.ui.debug("merging subrepo %s\n" % subrelpath(self))
hg.merge(self._repo, state[1], remind=False)
wctx = self._repo[None]
if self.dirty():
if anc != dst:
if _updateprompt(self.ui, self, wctx.dirty(), cur, dst):
mergefunc()
else:
mergefunc()
else:
mergefunc()
@annotatesubrepoerror
def push(self, opts):
force = opts.get('force')
newbranch = opts.get('new_branch')
ssh = opts.get('ssh')
# push subrepos depth-first for coherent ordering
c = self._repo['']
subs = c.substate # only repos that are committed
for s in sorted(subs):
if c.sub(s).push(opts) == 0:
return False
dsturl = _abssource(self._repo, True)
if not force:
if self.storeclean(dsturl):
self.ui.status(
_('no changes made to subrepo %s since last push to %s\n')
% (subrelpath(self), dsturl))
return None
self.ui.status(_('pushing subrepo %s to %s\n') %
(subrelpath(self), dsturl))
other = hg.peer(self._repo, {'ssh': ssh}, dsturl)
res = exchange.push(self._repo, other, force, newbranch=newbranch)
# the repo is now clean
self._cachestorehash(dsturl)
return res.cgresult
@annotatesubrepoerror
def outgoing(self, ui, dest, opts):
if 'rev' in opts or 'branch' in opts:
opts = copy.copy(opts)
opts.pop('rev', None)
opts.pop('branch', None)
return hg.outgoing(ui, self._repo, _abssource(self._repo, True), opts)
@annotatesubrepoerror
def incoming(self, ui, source, opts):
if 'rev' in opts or 'branch' in opts:
opts = copy.copy(opts)
opts.pop('rev', None)
opts.pop('branch', None)
return hg.incoming(ui, self._repo, _abssource(self._repo, False), opts)
@annotatesubrepoerror
def files(self):
rev = self._state[1]
ctx = self._repo[rev]
return ctx.manifest().keys()
def filedata(self, name):
rev = self._state[1]
return self._repo[rev][name].data()
def fileflags(self, name):
rev = self._state[1]
ctx = self._repo[rev]
return ctx.flags(name)
@annotatesubrepoerror
def printfiles(self, ui, m, fm, fmt, subrepos):
# If the parent context is a workingctx, use the workingctx here for
# consistency.
if self._ctx.rev() is None:
ctx = self._repo[None]
else:
rev = self._state[1]
ctx = self._repo[rev]
return cmdutil.files(ui, ctx, m, fm, fmt, subrepos)
@annotatesubrepoerror
def getfileset(self, expr):
if self._ctx.rev() is None:
ctx = self._repo[None]
else:
rev = self._state[1]
ctx = self._repo[rev]
files = ctx.getfileset(expr)
for subpath in ctx.substate:
sub = ctx.sub(subpath)
try:
files.extend(subpath + '/' + f for f in sub.getfileset(expr))
except error.LookupError:
self.ui.status(_("skipping missing subrepository: %s\n")
% self.wvfs.reljoin(reporelpath(self), subpath))
return files
def walk(self, match):
ctx = self._repo[None]
return ctx.walk(match)
@annotatesubrepoerror
def forget(self, match, prefix):
return cmdutil.forget(self.ui, self._repo, match,
self.wvfs.reljoin(prefix, self._path), True)
@annotatesubrepoerror
def removefiles(self, matcher, prefix, after, force, subrepos):
return cmdutil.remove(self.ui, self._repo, matcher,
self.wvfs.reljoin(prefix, self._path),
after, force, subrepos)
@annotatesubrepoerror
def revert(self, substate, *pats, **opts):
# reverting a subrepo is a 2 step process:
# 1. if the no_backup is not set, revert all modified
# files inside the subrepo
# 2. update the subrepo to the revision specified in
# the corresponding substate dictionary
self.ui.status(_('reverting subrepo %s\n') % substate[0])
if not opts.get('no_backup'):
# Revert all files on the subrepo, creating backups
# Note that this will not recursively revert subrepos
# We could do it if there was a set:subrepos() predicate
opts = opts.copy()
opts['date'] = None
opts['rev'] = substate[1]
self.filerevert(*pats, **opts)
# Update the repo to the revision specified in the given substate
if not opts.get('dry_run'):
self.get(substate, overwrite=True)
def filerevert(self, *pats, **opts):
ctx = self._repo[opts['rev']]
parents = self._repo.dirstate.parents()
if opts.get('all'):
pats = ['set:modified()']
else:
pats = []
cmdutil.revert(self.ui, self._repo, ctx, parents, *pats, **opts)
def shortid(self, revid):
return revid[:12]
def verify(self):
try:
rev = self._state[1]
ctx = self._repo.unfiltered()[rev]
if ctx.hidden():
# Since hidden revisions aren't pushed/pulled, it seems worth an
# explicit warning.
ui = self._repo.ui
ui.warn(_("subrepo '%s' is hidden in revision %s\n") %
(self._relpath, node.short(self._ctx.node())))
return 0
except error.RepoLookupError:
# A missing subrepo revision may be a case of needing to pull it, so
# don't treat this as an error.
self._repo.ui.warn(_("subrepo '%s' not found in revision %s\n") %
(self._relpath, node.short(self._ctx.node())))
return 0
@propertycache
def wvfs(self):
"""return own wvfs for efficiency and consistency
"""
return self._repo.wvfs
@propertycache
def _relpath(self):
"""return path to this subrepository as seen from outermost repository
"""
# Keep consistent dir separators by avoiding vfs.join(self._path)
return reporelpath(self._repo)
class svnsubrepo(abstractsubrepo):
def __init__(self, ctx, path, state):
super(svnsubrepo, self).__init__(ctx, path)
self._state = state
self._exe = util.findexe('svn')
if not self._exe:
raise error.Abort(_("'svn' executable not found for subrepo '%s'")
% self._path)
def _svncommand(self, commands, filename='', failok=False):
cmd = [self._exe]
extrakw = {}
if not self.ui.interactive():
# Making stdin be a pipe should prevent svn from behaving
# interactively even if we can't pass --non-interactive.
extrakw['stdin'] = subprocess.PIPE
# Starting in svn 1.5 --non-interactive is a global flag
# instead of being per-command, but we need to support 1.4 so
# we have to be intelligent about what commands take
# --non-interactive.
if commands[0] in ('update', 'checkout', 'commit'):
cmd.append('--non-interactive')
cmd.extend(commands)
if filename is not None:
path = self.wvfs.reljoin(self._ctx.repo().origroot,
self._path, filename)
cmd.append(path)
env = dict(os.environ)
# Avoid localized output, preserve current locale for everything else.
lc_all = env.get('LC_ALL')
if lc_all:
env['LANG'] = lc_all
del env['LC_ALL']
env['LC_MESSAGES'] = 'C'
p = subprocess.Popen(cmd, bufsize=-1, close_fds=util.closefds,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, env=env, **extrakw)
stdout, stderr = p.communicate()
stderr = stderr.strip()
if not failok:
if p.returncode:
raise error.Abort(stderr or 'exited with code %d'
% p.returncode)
if stderr:
self.ui.warn(stderr + '\n')
return stdout, stderr
@propertycache
def _svnversion(self):
output, err = self._svncommand(['--version', '--quiet'], filename=None)
m = re.search(r'^(\d+)\.(\d+)', output)
if not m:
raise error.Abort(_('cannot retrieve svn tool version'))
return (int(m.group(1)), int(m.group(2)))
def _wcrevs(self):
# Get the working directory revision as well as the last
# commit revision so we can compare the subrepo state with
# both. We used to store the working directory one.
output, err = self._svncommand(['info', '--xml'])
doc = xml.dom.minidom.parseString(output)
entries = doc.getElementsByTagName('entry')
lastrev, rev = '0', '0'
if entries:
rev = str(entries[0].getAttribute('revision')) or '0'
commits = entries[0].getElementsByTagName('commit')
if commits:
lastrev = str(commits[0].getAttribute('revision')) or '0'
return (lastrev, rev)
def _wcrev(self):
return self._wcrevs()[0]
def _wcchanged(self):
"""Return (changes, extchanges, missing) where changes is True
if the working directory was changed, extchanges is
True if any of these changes concern an external entry and missing
is True if any change is a missing entry.
"""
output, err = self._svncommand(['status', '--xml'])
externals, changes, missing = [], [], []
doc = xml.dom.minidom.parseString(output)
for e in doc.getElementsByTagName('entry'):
s = e.getElementsByTagName('wc-status')
if not s:
continue
item = s[0].getAttribute('item')
props = s[0].getAttribute('props')
path = e.getAttribute('path')
if item == 'external':
externals.append(path)
elif item == 'missing':
missing.append(path)
if (item not in ('', 'normal', 'unversioned', 'external')
or props not in ('', 'none', 'normal')):
changes.append(path)
for path in changes:
for ext in externals:
if path == ext or path.startswith(ext + os.sep):
return True, True, bool(missing)
return bool(changes), False, bool(missing)
def dirty(self, ignoreupdate=False):
if not self._wcchanged()[0]:
if self._state[1] in self._wcrevs() or ignoreupdate:
return False
return True
def basestate(self):
lastrev, rev = self._wcrevs()
if lastrev != rev:
# Last committed rev is not the same than rev. We would
# like to take lastrev but we do not know if the subrepo
# URL exists at lastrev. Test it and fallback to rev it
# is not there.
try:
self._svncommand(['list', '%s@%s' % (self._state[0], lastrev)])
return lastrev
except error.Abort:
pass
return rev
@annotatesubrepoerror
def commit(self, text, user, date):
# user and date are out of our hands since svn is centralized
changed, extchanged, missing = self._wcchanged()
if not changed:
return self.basestate()
if extchanged:
# Do not try to commit externals
raise error.Abort(_('cannot commit svn externals'))
if missing:
# svn can commit with missing entries but aborting like hg
# seems a better approach.
raise error.Abort(_('cannot commit missing svn entries'))
commitinfo, err = self._svncommand(['commit', '-m', text])
self.ui.status(commitinfo)
newrev = re.search('Committed revision ([0-9]+).', commitinfo)
if not newrev:
if not commitinfo.strip():
# Sometimes, our definition of "changed" differs from
# svn one. For instance, svn ignores missing files
# when committing. If there are only missing files, no
# commit is made, no output and no error code.
raise error.Abort(_('failed to commit svn changes'))
raise error.Abort(commitinfo.splitlines()[-1])
newrev = newrev.groups()[0]
self.ui.status(self._svncommand(['update', '-r', newrev])[0])
return newrev
@annotatesubrepoerror
def remove(self):
if self.dirty():
self.ui.warn(_('not removing repo %s because '
'it has changes.\n') % self._path)
return
self.ui.note(_('removing subrepo %s\n') % self._path)
self.wvfs.rmtree(forcibly=True)
try:
pwvfs = self._ctx.repo().wvfs
pwvfs.removedirs(pwvfs.dirname(self._path))
except OSError:
pass
@annotatesubrepoerror
def get(self, state, overwrite=False):
if overwrite:
self._svncommand(['revert', '--recursive'])
args = ['checkout']
if self._svnversion >= (1, 5):
args.append('--force')
# The revision must be specified at the end of the URL to properly
# update to a directory which has since been deleted and recreated.
args.append('%s@%s' % (state[0], state[1]))
status, err = self._svncommand(args, failok=True)
_sanitize(self.ui, self.wvfs, '.svn')
if not re.search('Checked out revision [0-9]+.', status):
if ('is already a working copy for a different URL' in err
and (self._wcchanged()[:2] == (False, False))):
# obstructed but clean working copy, so just blow it away.
self.remove()
self.get(state, overwrite=False)
return
raise error.Abort((status or err).splitlines()[-1])
self.ui.status(status)
@annotatesubrepoerror
def merge(self, state):
old = self._state[1]
new = state[1]
wcrev = self._wcrev()
if new != wcrev:
dirty = old == wcrev or self._wcchanged()[0]
if _updateprompt(self.ui, self, dirty, wcrev, new):
self.get(state, False)
def push(self, opts):
# push is a no-op for SVN
return True
@annotatesubrepoerror
def files(self):
output = self._svncommand(['list', '--recursive', '--xml'])[0]
doc = xml.dom.minidom.parseString(output)
paths = []
for e in doc.getElementsByTagName('entry'):
kind = str(e.getAttribute('kind'))
if kind != 'file':
continue
name = ''.join(c.data for c
in e.getElementsByTagName('name')[0].childNodes
if c.nodeType == c.TEXT_NODE)
paths.append(name.encode('utf-8'))
return paths
def filedata(self, name):
return self._svncommand(['cat'], name)[0]
class gitsubrepo(abstractsubrepo):
def __init__(self, ctx, path, state):
super(gitsubrepo, self).__init__(ctx, path)
self._state = state
self._abspath = ctx.repo().wjoin(path)
self._subparent = ctx.repo()
self._ensuregit()
def _ensuregit(self):
try:
self._gitexecutable = 'git'
out, err = self._gitnodir(['--version'])
except OSError as e:
genericerror = _("error executing git for subrepo '%s': %s")
notfoundhint = _("check git is installed and in your PATH")
if e.errno != errno.ENOENT:
raise error.Abort(genericerror % (self._path, e.strerror))
elif os.name == 'nt':
try:
self._gitexecutable = 'git.cmd'
out, err = self._gitnodir(['--version'])
except OSError as e2:
if e2.errno == errno.ENOENT:
raise error.Abort(_("couldn't find 'git' or 'git.cmd'"
" for subrepo '%s'") % self._path,
hint=notfoundhint)
else:
raise error.Abort(genericerror % (self._path,
e2.strerror))
else:
raise error.Abort(_("couldn't find git for subrepo '%s'")
% self._path, hint=notfoundhint)
versionstatus = self._checkversion(out)
if versionstatus == 'unknown':
self.ui.warn(_('cannot retrieve git version\n'))
elif versionstatus == 'abort':
raise error.Abort(_('git subrepo requires at least 1.6.0 or later'))
elif versionstatus == 'warning':
self.ui.warn(_('git subrepo requires at least 1.6.0 or later\n'))
@staticmethod
def _gitversion(out):
m = re.search(r'^git version (\d+)\.(\d+)\.(\d+)', out)
if m:
return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
m = re.search(r'^git version (\d+)\.(\d+)', out)
if m:
return (int(m.group(1)), int(m.group(2)), 0)
return -1
@staticmethod
def _checkversion(out):
'''ensure git version is new enough
>>> _checkversion = gitsubrepo._checkversion
>>> _checkversion('git version 1.6.0')
'ok'
>>> _checkversion('git version 1.8.5')
'ok'
>>> _checkversion('git version 1.4.0')
'abort'
>>> _checkversion('git version 1.5.0')
'warning'
>>> _checkversion('git version 1.9-rc0')
'ok'
>>> _checkversion('git version 1.9.0.265.g81cdec2')
'ok'
>>> _checkversion('git version 1.9.0.GIT')
'ok'
>>> _checkversion('git version 12345')
'unknown'
>>> _checkversion('no')
'unknown'
'''
version = gitsubrepo._gitversion(out)
# git 1.4.0 can't work at all, but 1.5.X can in at least some cases,
# despite the docstring comment. For now, error on 1.4.0, warn on
# 1.5.0 but attempt to continue.
if version == -1:
return 'unknown'
if version < (1, 5, 0):
return 'abort'
elif version < (1, 6, 0):
return 'warning'
return 'ok'
def _gitcommand(self, commands, env=None, stream=False):
return self._gitdir(commands, env=env, stream=stream)[0]
def _gitdir(self, commands, env=None, stream=False):
return self._gitnodir(commands, env=env, stream=stream,
cwd=self._abspath)
def _gitnodir(self, commands, env=None, stream=False, cwd=None):
"""Calls the git command
The methods tries to call the git command. versions prior to 1.6.0
are not supported and very probably fail.
"""
self.ui.debug('%s: git %s\n' % (self._relpath, ' '.join(commands)))
if env is None:
env = os.environ.copy()
# fix for Git CVE-2015-7545
if 'GIT_ALLOW_PROTOCOL' not in env:
env['GIT_ALLOW_PROTOCOL'] = 'file:git:http:https:ssh'
# unless ui.quiet is set, print git's stderr,
# which is mostly progress and useful info
errpipe = None
if self.ui.quiet:
errpipe = open(os.devnull, 'w')
p = subprocess.Popen([self._gitexecutable] + commands, bufsize=-1,
cwd=cwd, env=env, close_fds=util.closefds,
stdout=subprocess.PIPE, stderr=errpipe)
if stream:
return p.stdout, None
retdata = p.stdout.read().strip()
# wait for the child to exit to avoid race condition.
p.wait()
if p.returncode != 0 and p.returncode != 1:
# there are certain error codes that are ok
command = commands[0]
if command in ('cat-file', 'symbolic-ref'):
return retdata, p.returncode
# for all others, abort
raise error.Abort('git %s error %d in %s' %
(command, p.returncode, self._relpath))
return retdata, p.returncode
def _gitmissing(self):
return not self.wvfs.exists('.git')
def _gitstate(self):
return self._gitcommand(['rev-parse', 'HEAD'])
def _gitcurrentbranch(self):
current, err = self._gitdir(['symbolic-ref', 'HEAD', '--quiet'])
if err:
current = None
return current
def _gitremote(self, remote):
out = self._gitcommand(['remote', 'show', '-n', remote])
line = out.split('\n')[1]
i = line.index('URL: ') + len('URL: ')
return line[i:]
def _githavelocally(self, revision):
out, code = self._gitdir(['cat-file', '-e', revision])
return code == 0
def _gitisancestor(self, r1, r2):
base = self._gitcommand(['merge-base', r1, r2])
return base == r1
def _gitisbare(self):
return self._gitcommand(['config', '--bool', 'core.bare']) == 'true'
def _gitupdatestat(self):
"""This must be run before git diff-index.
diff-index only looks at changes to file stat;
this command looks at file contents and updates the stat."""
self._gitcommand(['update-index', '-q', '--refresh'])
def _gitbranchmap(self):
'''returns 2 things:
a map from git branch to revision
a map from revision to branches'''
branch2rev = {}
rev2branch = {}
out = self._gitcommand(['for-each-ref', '--format',
'%(objectname) %(refname)'])
for line in out.split('\n'):
revision, ref = line.split(' ')
if (not ref.startswith('refs/heads/') and
not ref.startswith('refs/remotes/')):
continue
if ref.startswith('refs/remotes/') and ref.endswith('/HEAD'):
continue # ignore remote/HEAD redirects
branch2rev[ref] = revision
rev2branch.setdefault(revision, []).append(ref)
return branch2rev, rev2branch
def _gittracking(self, branches):
'return map of remote branch to local tracking branch'
# assumes no more than one local tracking branch for each remote
tracking = {}
for b in branches:
if b.startswith('refs/remotes/'):
continue
bname = b.split('/', 2)[2]
remote = self._gitcommand(['config', 'branch.%s.remote' % bname])
if remote:
ref = self._gitcommand(['config', 'branch.%s.merge' % bname])
tracking['refs/remotes/%s/%s' %
(remote, ref.split('/', 2)[2])] = b
return tracking
def _abssource(self, source):
if '://' not in source:
# recognize the scp syntax as an absolute source
colon = source.find(':')
if colon != -1 and '/' not in source[:colon]:
return source
self._subsource = source
return _abssource(self)
def _fetch(self, source, revision):
if self._gitmissing():
source = self._abssource(source)
self.ui.status(_('cloning subrepo %s from %s\n') %
(self._relpath, source))
self._gitnodir(['clone', source, self._abspath])
if self._githavelocally(revision):
return
self.ui.status(_('pulling subrepo %s from %s\n') %
(self._relpath, self._gitremote('origin')))
# try only origin: the originally cloned repo
self._gitcommand(['fetch'])
if not self._githavelocally(revision):
raise error.Abort(_("revision %s does not exist in subrepo %s\n") %
(revision, self._relpath))
@annotatesubrepoerror
def dirty(self, ignoreupdate=False):
if self._gitmissing():
return self._state[1] != ''
if self._gitisbare():
return True
if not ignoreupdate and self._state[1] != self._gitstate():
# different version checked out
return True
# check for staged changes or modified files; ignore untracked files
self._gitupdatestat()
out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
return code == 1
def basestate(self):
return self._gitstate()
@annotatesubrepoerror
def get(self, state, overwrite=False):
source, revision, kind = state
if not revision:
self.remove()
return
self._fetch(source, revision)
# if the repo was set to be bare, unbare it
if self._gitisbare():
self._gitcommand(['config', 'core.bare', 'false'])
if self._gitstate() == revision:
self._gitcommand(['reset', '--hard', 'HEAD'])
return
elif self._gitstate() == revision:
if overwrite:
# first reset the index to unmark new files for commit, because
# reset --hard will otherwise throw away files added for commit,
# not just unmark them.
self._gitcommand(['reset', 'HEAD'])
self._gitcommand(['reset', '--hard', 'HEAD'])
return
branch2rev, rev2branch = self._gitbranchmap()
def checkout(args):
cmd = ['checkout']
if overwrite:
# first reset the index to unmark new files for commit, because
# the -f option will otherwise throw away files added for
# commit, not just unmark them.
self._gitcommand(['reset', 'HEAD'])
cmd.append('-f')
self._gitcommand(cmd + args)
_sanitize(self.ui, self.wvfs, '.git')
def rawcheckout():
# no branch to checkout, check it out with no branch
self.ui.warn(_('checking out detached HEAD in subrepo %s\n') %
self._relpath)
self.ui.warn(_('check out a git branch if you intend '
'to make changes\n'))
checkout(['-q', revision])
if revision not in rev2branch:
rawcheckout()
return
branches = rev2branch[revision]
firstlocalbranch = None
for b in branches:
if b == 'refs/heads/master':
# master trumps all other branches
checkout(['refs/heads/master'])
return
if not firstlocalbranch and not b.startswith('refs/remotes/'):
firstlocalbranch = b
if firstlocalbranch:
checkout([firstlocalbranch])
return
tracking = self._gittracking(branch2rev.keys())
# choose a remote branch already tracked if possible
remote = branches[0]
if remote not in tracking:
for b in branches:
if b in tracking:
remote = b
break
if remote not in tracking:
# create a new local tracking branch
local = remote.split('/', 3)[3]
checkout(['-b', local, remote])
elif self._gitisancestor(branch2rev[tracking[remote]], remote):
# When updating to a tracked remote branch,
# if the local tracking branch is downstream of it,
# a normal `git pull` would have performed a "fast-forward merge"
# which is equivalent to updating the local branch to the remote.
# Since we are only looking at branching at update, we need to
# detect this situation and perform this action lazily.
if tracking[remote] != self._gitcurrentbranch():
checkout([tracking[remote]])
self._gitcommand(['merge', '--ff', remote])
_sanitize(self.ui, self.wvfs, '.git')
else:
# a real merge would be required, just checkout the revision
rawcheckout()
@annotatesubrepoerror
def commit(self, text, user, date):
if self._gitmissing():
raise error.Abort(_("subrepo %s is missing") % self._relpath)
cmd = ['commit', '-a', '-m', text]
env = os.environ.copy()
if user:
cmd += ['--author', user]
if date:
# git's date parser silently ignores when seconds < 1e9
# convert to ISO8601
env['GIT_AUTHOR_DATE'] = util.datestr(date,
'%Y-%m-%dT%H:%M:%S %1%2')
self._gitcommand(cmd, env=env)
# make sure commit works otherwise HEAD might not exist under certain
# circumstances
return self._gitstate()
@annotatesubrepoerror
def merge(self, state):
source, revision, kind = state
self._fetch(source, revision)
base = self._gitcommand(['merge-base', revision, self._state[1]])
self._gitupdatestat()
out, code = self._gitdir(['diff-index', '--quiet', 'HEAD'])
def mergefunc():
if base == revision:
self.get(state) # fast forward merge
elif base != self._state[1]:
self._gitcommand(['merge', '--no-commit', revision])
_sanitize(self.ui, self.wvfs, '.git')
if self.dirty():
if self._gitstate() != revision:
dirty = self._gitstate() == self._state[1] or code != 0
if _updateprompt(self.ui, self, dirty,
self._state[1][:7], revision[:7]):
mergefunc()
else:
mergefunc()
@annotatesubrepoerror
def push(self, opts):
force = opts.get('force')
if not self._state[1]:
return True
if self._gitmissing():
raise error.Abort(_("subrepo %s is missing") % self._relpath)
# if a branch in origin contains the revision, nothing to do
branch2rev, rev2branch = self._gitbranchmap()
if self._state[1] in rev2branch:
for b in rev2branch[self._state[1]]:
if b.startswith('refs/remotes/origin/'):
return True
for b, revision in branch2rev.iteritems():
if b.startswith('refs/remotes/origin/'):
if self._gitisancestor(self._state[1], revision):
return True
# otherwise, try to push the currently checked out branch
cmd = ['push']
if force:
cmd.append('--force')
current = self._gitcurrentbranch()
if current:
# determine if the current branch is even useful
if not self._gitisancestor(self._state[1], current):
self.ui.warn(_('unrelated git branch checked out '
'in subrepo %s\n') % self._relpath)
return False
self.ui.status(_('pushing branch %s of subrepo %s\n') %
(current.split('/', 2)[2], self._relpath))
ret = self._gitdir(cmd + ['origin', current])
return ret[1] == 0
else:
self.ui.warn(_('no branch checked out in subrepo %s\n'
'cannot push revision %s\n') %
(self._relpath, self._state[1]))
return False
@annotatesubrepoerror
def add(self, ui, match, prefix, explicitonly, **opts):
if self._gitmissing():
return []
(modified, added, removed,
deleted, unknown, ignored, clean) = self.status(None, unknown=True,
clean=True)
tracked = set()
# dirstates 'amn' warn, 'r' is added again
for l in (modified, added, deleted, clean):
tracked.update(l)
# Unknown files not of interest will be rejected by the matcher
files = unknown
files.extend(match.files())
rejected = []
files = [f for f in sorted(set(files)) if match(f)]
for f in files:
exact = match.exact(f)
command = ["add"]
if exact:
command.append("-f") #should be added, even if ignored
if ui.verbose or not exact:
ui.status(_('adding %s\n') % match.rel(f))
if f in tracked: # hg prints 'adding' even if already tracked
if exact:
rejected.append(f)
continue
if not opts.get('dry_run'):
self._gitcommand(command + [f])
for f in rejected:
ui.warn(_("%s already tracked!\n") % match.abs(f))
return rejected
@annotatesubrepoerror
def remove(self):
if self._gitmissing():
return
if self.dirty():
self.ui.warn(_('not removing repo %s because '
'it has changes.\n') % self._relpath)
return
# we can't fully delete the repository as it may contain
# local-only history
self.ui.note(_('removing subrepo %s\n') % self._relpath)
self._gitcommand(['config', 'core.bare', 'true'])
for f, kind in self.wvfs.readdir():
if f == '.git':
continue
if kind == stat.S_IFDIR:
self.wvfs.rmtree(f)
else:
self.wvfs.unlink(f)
def archive(self, archiver, prefix, match=None):
total = 0
source, revision = self._state
if not revision:
return total
self._fetch(source, revision)
# Parse git's native archive command.
# This should be much faster than manually traversing the trees
# and objects with many subprocess calls.
tarstream = self._gitcommand(['archive', revision], stream=True)
tar = tarfile.open(fileobj=tarstream, mode='r|')
relpath = subrelpath(self)
self.ui.progress(_('archiving (%s)') % relpath, 0, unit=_('files'))
for i, info in enumerate(tar):
if info.isdir():
continue
if match and not match(info.name):
continue
if info.issym():
data = info.linkname
else:
data = tar.extractfile(info).read()
archiver.addfile(prefix + self._path + '/' + info.name,
info.mode, info.issym(), data)
total += 1
self.ui.progress(_('archiving (%s)') % relpath, i + 1,
unit=_('files'))
self.ui.progress(_('archiving (%s)') % relpath, None)
return total
@annotatesubrepoerror
def cat(self, match, prefix, **opts):
rev = self._state[1]
if match.anypats():
return 1 #No support for include/exclude yet
if not match.files():
return 1
for f in match.files():
output = self._gitcommand(["show", "%s:%s" % (rev, f)])
fp = cmdutil.makefileobj(self._subparent, opts.get('output'),
self._ctx.node(),
pathname=self.wvfs.reljoin(prefix, f))
fp.write(output)
fp.close()
return 0
@annotatesubrepoerror
def status(self, rev2, **opts):
rev1 = self._state[1]
if self._gitmissing() or not rev1:
# if the repo is missing, return no results
return scmutil.status([], [], [], [], [], [], [])
modified, added, removed = [], [], []
self._gitupdatestat()
if rev2:
command = ['diff-tree', '--no-renames', '-r', rev1, rev2]
else:
command = ['diff-index', '--no-renames', rev1]
out = self._gitcommand(command)
for line in out.split('\n'):
tab = line.find('\t')
if tab == -1:
continue
status, f = line[tab - 1], line[tab + 1:]
if status == 'M':
modified.append(f)
elif status == 'A':
added.append(f)
elif status == 'D':
removed.append(f)
deleted, unknown, ignored, clean = [], [], [], []
command = ['status', '--porcelain', '-z']
if opts.get('unknown'):
command += ['--untracked-files=all']
if opts.get('ignored'):
command += ['--ignored']
out = self._gitcommand(command)
changedfiles = set()
changedfiles.update(modified)
changedfiles.update(added)
changedfiles.update(removed)
for line in out.split('\0'):
if not line:
continue
st = line[0:2]
#moves and copies show 2 files on one line
if line.find('\0') >= 0:
filename1, filename2 = line[3:].split('\0')
else:
filename1 = line[3:]
filename2 = None
changedfiles.add(filename1)
if filename2:
changedfiles.add(filename2)
if st == '??':
unknown.append(filename1)
elif st == '!!':
ignored.append(filename1)
if opts.get('clean'):
out = self._gitcommand(['ls-files'])
for f in out.split('\n'):
if not f in changedfiles:
clean.append(f)
return scmutil.status(modified, added, removed, deleted,
unknown, ignored, clean)
@annotatesubrepoerror
def diff(self, ui, diffopts, node2, match, prefix, **opts):
node1 = self._state[1]
cmd = ['diff', '--no-renames']
if opts['stat']:
cmd.append('--stat')
else:
# for Git, this also implies '-p'
cmd.append('-U%d' % diffopts.context)
gitprefix = self.wvfs.reljoin(prefix, self._path)
if diffopts.noprefix:
cmd.extend(['--src-prefix=%s/' % gitprefix,
'--dst-prefix=%s/' % gitprefix])
else:
cmd.extend(['--src-prefix=a/%s/' % gitprefix,
'--dst-prefix=b/%s/' % gitprefix])
if diffopts.ignorews:
cmd.append('--ignore-all-space')
if diffopts.ignorewsamount:
cmd.append('--ignore-space-change')
if self._gitversion(self._gitcommand(['--version'])) >= (1, 8, 4) \
and diffopts.ignoreblanklines:
cmd.append('--ignore-blank-lines')
cmd.append(node1)
if node2:
cmd.append(node2)
output = ""
if match.always():
output += self._gitcommand(cmd) + '\n'
else:
st = self.status(node2)[:3]
files = [f for sublist in st for f in sublist]
for f in files:
if match(f):
output += self._gitcommand(cmd + ['--', f]) + '\n'
if output.strip():
ui.write(output)
@annotatesubrepoerror
def revert(self, substate, *pats, **opts):
self.ui.status(_('reverting subrepo %s\n') % substate[0])
if not opts.get('no_backup'):
status = self.status(None)
names = status.modified
for name in names:
bakname = scmutil.origpath(self.ui, self._subparent, name)
self.ui.note(_('saving current version of %s as %s\n') %
(name, bakname))
self.wvfs.rename(name, bakname)
if not opts.get('dry_run'):
self.get(substate, overwrite=True)
return []
def shortid(self, revid):
return revid[:7]
types = {
'hg': hgsubrepo,
'svn': svnsubrepo,
'git': gitsubrepo,
} | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <cuda_runtime_api.h>
#include <c10/core/DeviceGuard.h>
#include <c10/core/Stream.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/util/Exception.h>
/*
* Stream pool note.
*
* A CUDAStream is an abstraction of an actual cuStream on the GPU. CUDAStreams
* are backed by cuStreams, but they use several pools to minimize the costs
* associated with creating, retaining, and destroying cuStreams.
*
* There are three pools per device, and a device's pools are lazily created.
*
* The first pool contains only the default stream. When the default stream
* is requested it's returned.
*
* The second pool is the "low priority" or "default priority" streams. In
* HIP builds there is no distinction between streams in this pool and streams
* in the third pool (below). There are 32 of these streams per device, and
* when a stream is requested one of these streams is returned round-robin.
* That is, the first stream requested is at index 0, the second at index 1...
* to index 31, then index 0 again.
*
* This means that if 33 low priority streams are requested, the first and
* last streams requested are actually the same stream (under the covers)
* and kernels enqueued on them cannot run concurrently.
*
* The third pool is the "high priority" streams. The third pool acts like
* the second pool except the streams are created with a higher priority.
*
* These pools suggest that stream users should prefer many short-lived streams,
* as the cost of acquiring and releasing streams is effectively zero. If
* many longer-lived streams are required in performance critical scenarios
* then the functionality here may need to be extended to allow, for example,
* "reserving" a subset of the pool so that other streams do not accidentally
* overlap the performance critical streams.
*
* Note: although the notion of "current stream for device" is thread local
* (every OS thread has a separate current stream, as one might expect),
* the stream pool is global across all threads; stream 0 is always stream 0
* no matter which thread you use it on. Multiple threads can synchronize
* on the same stream. Although the CUDA documentation is not very clear
* on the matter, streams are thread safe; e.g., it is safe to enqueue
* a kernel on the same stream from two different threads.
*/
namespace c10::cuda {
static constexpr int max_compile_time_stream_priorities = 4;
// Value object representing a CUDA stream. This is just a wrapper
// around c10::Stream, but it comes with a little extra CUDA-specific
// functionality (conversion to cudaStream_t), and a guarantee that
// the wrapped c10::Stream really is a CUDA stream.
class C10_CUDA_API CUDAStream {
public:
enum Unchecked { UNCHECKED };
/// Construct a CUDAStream from a Stream. This construction is checked,
/// and will raise an error if the Stream is not, in fact, a CUDA stream.
explicit CUDAStream(Stream stream) : stream_(stream) {
TORCH_CHECK(stream_.device_type() == DeviceType::CUDA);
}
/// Construct a CUDAStream from a Stream with no error checking.
/// This constructor uses the "named" constructor idiom, and can
/// be invoked as: CUDAStream(CUDAStream::UNCHECKED, stream)
explicit CUDAStream(Unchecked /*unused*/, Stream stream) : stream_(stream) {}
bool operator==(const CUDAStream& other) const noexcept {
return unwrap() == other.unwrap();
}
bool operator!=(const CUDAStream& other) const noexcept {
return unwrap() != other.unwrap();
}
/// Implicit conversion to cudaStream_t.
operator cudaStream_t() const {
return stream();
}
/// Implicit conversion to Stream (a.k.a., forget that the stream is a
/// CUDA stream).
operator Stream() const {
return unwrap();
}
/// Used to avoid baking in device type explicitly to Python-side API.
DeviceType device_type() const {
return DeviceType::CUDA;
}
/// Get the CUDA device index that this stream is associated with.
DeviceIndex device_index() const {
return stream_.device_index();
}
/// Get the full Device that this stream is associated with. The Device
/// is guaranteed to be a CUDA device.
Device device() const {
return Device(DeviceType::CUDA, device_index());
}
/// Return the stream ID corresponding to this particular stream.
StreamId id() const {
return stream_.id();
}
bool query() const;
void synchronize() const;
int priority() const {
DeviceGuard guard{stream_.device()};
int priority = 0;
C10_CUDA_CHECK(cudaStreamGetPriority(stream(), &priority));
return priority;
}
/// Explicit conversion to cudaStream_t.
cudaStream_t stream() const;
/// Explicit conversion to Stream.
Stream unwrap() const {
return stream_;
}
/// Reversibly pack a CUDAStream into a struct representation.
/// Previously the stream's data was packed into a single int64_t,
/// as it was assumed the fields would not require more than
/// 64 bits of storage in total.
/// See https://github.com/pytorch/pytorch/issues/75854
/// for more information regarding newer platforms that may violate
/// this assumption.
///
/// The CUDAStream can be unpacked using unpack().
struct c10::StreamData3 pack3() const {
return stream_.pack3();
}
// Unpack a CUDAStream from the 3 fields generated by pack().
static CUDAStream unpack3(
StreamId stream_id,
DeviceIndex device_index,
DeviceType device_type) {
return CUDAStream(Stream::unpack3(stream_id, device_index, device_type));
}
static std::tuple<int, int> priority_range() {
// Note: this returns the range of priority **supported by PyTorch**, not
// the range of priority **supported by CUDA**. The former is a subset of
// the latter.
int least_priority = 0, greatest_priority = 0;
C10_CUDA_CHECK(
cudaDeviceGetStreamPriorityRange(&least_priority, &greatest_priority));
#ifdef USE_ROCM
// See Note [HIP stream priorities]
TORCH_INTERNAL_ASSERT(
least_priority == 1, "Unexpected HIP stream priority range");
least_priority = 0;
#else
TORCH_INTERNAL_ASSERT(
least_priority == 0, "Unexpected CUDA stream priority range");
#endif
TORCH_INTERNAL_ASSERT(
greatest_priority <= -1, "Unexpected CUDA stream priority range");
greatest_priority = std::max(
-c10::cuda::max_compile_time_stream_priorities + 1, greatest_priority);
return std::make_tuple(least_priority, greatest_priority);
}
// Deleted for now; use CUDAEvent::block instead
// void synchronize_with(const CUDAEvent& event) const;
private:
Stream stream_;
};
/**
* Get a new stream from the CUDA stream pool. You can think of this
* as "creating" a new stream, but no such creation actually happens;
* instead, streams are preallocated from the pool and returned in a
* round-robin fashion.
*
* You can request a stream from the high priority pool by setting
* isHighPriority to true, or a stream for a specific device by setting device
* (defaulting to the current CUDA stream.)
*/
C10_API CUDAStream
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
// no default priority to disambiguate overloads
C10_API CUDAStream
getStreamFromPool(const int priority, DeviceIndex device = -1);
/**
* Get a CUDAStream from a externally allocated one.
*
* This is mainly for interoperability with different libraries where we
* want to operate on a non-torch allocated stream for data exchange or similar
* purposes
*/
C10_API CUDAStream
getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);
/**
* Get the default CUDA stream, for the passed CUDA device, or for the
* current device if no device index is passed. The default stream is
* where most computation occurs when you aren't explicitly using
* streams.
*/
C10_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
/**
* Get the current CUDA stream, for the passed CUDA device, or for the
* current device if no device index is passed. The current CUDA stream
* will usually be the default CUDA stream for the device, but it may
* be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard'
* or 'CUDAStreamGuard'.
*/
C10_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
/**
* Set the current stream on the device of the passed in stream to be
* the passed in stream. Yes, you read that right: this function
* has *nothing* to do with the current device: it toggles the current
* stream of the device of the passed stream.
*
* Confused? Avoid using this function; prefer using 'CUDAStreamGuard' instead
* (which will switch both your current device and current stream in the way you
* expect, and reset it back to its original state afterwards).
*/
C10_API void setCurrentCUDAStream(CUDAStream stream);
C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s);
} // namespace c10::cuda
// hipify v2 backward compat in external projects
#ifdef USE_ROCM
namespace c10::hip {
using c10::cuda::getStreamFromExternal;
using c10::cuda::getStreamFromPool;
// must use inline wrappers instead of reference aliases due to default args
inline c10::cuda::CUDAStream getDefaultHIPStream(
DeviceIndex device_index = -1) {
return c10::cuda::getDefaultCUDAStream(device_index);
}
inline c10::cuda::CUDAStream getCurrentHIPStream(
DeviceIndex device_index = -1) {
return c10::cuda::getCurrentCUDAStream(device_index);
}
inline auto& setCurrentHIPStream = c10::cuda::setCurrentCUDAStream;
} // namespace c10::hip
#endif
namespace std {
template <>
struct hash<c10::cuda::CUDAStream> {
size_t operator()(c10::cuda::CUDAStream s) const noexcept {
return std::hash<c10::Stream>{}(s.unwrap());
}
};
} // namespace std | c | github | https://github.com/pytorch/pytorch | c10/cuda/CUDAStream.h |
/*
* Copyright (C) 2014 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.util.concurrent.MoreExecutors.directExecutor;
import static java.util.concurrent.Executors.newFixedThreadPool;
import com.google.common.collect.ConcurrentHashMultiset;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableMultiset;
import com.google.common.collect.Multiset;
import com.google.common.testing.TestLogHandler;
import java.util.Map;
import java.util.Map.Entry;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.logging.Level;
import java.util.logging.Logger;
import junit.framework.TestCase;
import org.jspecify.annotations.NullUnmarked;
/** Tests for {@link ListenerCallQueue}. */
@NullUnmarked
public class ListenerCallQueueTest extends TestCase {
private static final ListenerCallQueue.Event<Object> THROWING_EVENT =
new ListenerCallQueue.Event<Object>() {
@Override
public void call(Object object) {
throw new RuntimeException();
}
@Override
public String toString() {
return "throwing()";
}
};
public void testEnqueueAndDispatch() {
Object listener = new Object();
ListenerCallQueue<Object> queue = new ListenerCallQueue<>();
queue.addListener(listener, directExecutor());
Multiset<Object> counters = ConcurrentHashMultiset.create();
queue.enqueue(incrementingEvent(counters, listener, 1));
queue.enqueue(incrementingEvent(counters, listener, 2));
queue.enqueue(incrementingEvent(counters, listener, 3));
queue.enqueue(incrementingEvent(counters, listener, 4));
assertEquals(0, counters.size());
queue.dispatch();
assertEquals(multiset(listener, 4), counters);
}
public void testEnqueueAndDispatch_multipleListeners() {
Object listener1 = new Object();
ListenerCallQueue<Object> queue = new ListenerCallQueue<>();
queue.addListener(listener1, directExecutor());
Multiset<Object> counters = ConcurrentHashMultiset.create();
queue.enqueue(incrementingEvent(counters, listener1, 1));
queue.enqueue(incrementingEvent(counters, listener1, 2));
Object listener2 = new Object();
queue.addListener(listener2, directExecutor());
queue.enqueue(incrementingEvent(counters, multiset(listener1, 3, listener2, 1)));
queue.enqueue(incrementingEvent(counters, multiset(listener1, 4, listener2, 2)));
assertEquals(0, counters.size());
queue.dispatch();
assertEquals(multiset(listener1, 4, listener2, 2), counters);
}
public void testEnqueueAndDispatch_withExceptions() {
Object listener = new Object();
ListenerCallQueue<Object> queue = new ListenerCallQueue<>();
queue.addListener(listener, directExecutor());
Multiset<Object> counters = ConcurrentHashMultiset.create();
queue.enqueue(incrementingEvent(counters, listener, 1));
queue.enqueue(THROWING_EVENT);
queue.enqueue(incrementingEvent(counters, listener, 2));
queue.enqueue(THROWING_EVENT);
queue.enqueue(incrementingEvent(counters, listener, 3));
queue.enqueue(THROWING_EVENT);
queue.enqueue(incrementingEvent(counters, listener, 4));
queue.enqueue(THROWING_EVENT);
assertEquals(0, counters.size());
queue.dispatch();
assertEquals(multiset(listener, 4), counters);
}
static final class MyListener {
@Override
public String toString() {
return "MyListener";
}
}
public void testEnqueueAndDispatch_withLabeledExceptions() {
Object listener = new MyListener();
ListenerCallQueue<Object> queue = new ListenerCallQueue<>();
queue.addListener(listener, directExecutor());
queue.enqueue(THROWING_EVENT, "custom-label");
Logger logger = Logger.getLogger(ListenerCallQueue.class.getName());
logger.setLevel(Level.SEVERE);
TestLogHandler logHandler = new TestLogHandler();
logger.addHandler(logHandler);
try {
queue.dispatch();
} finally {
logger.removeHandler(logHandler);
}
assertEquals(1, logHandler.getStoredLogRecords().size());
assertEquals(
"Exception while executing callback: MyListener custom-label",
logHandler.getStoredLogRecords().get(0).getMessage());
}
public void testEnqueueAndDispatch_multithreaded() throws InterruptedException {
Object listener = new Object();
ExecutorService service = newFixedThreadPool(4);
ListenerCallQueue<Object> queue = new ListenerCallQueue<>();
try {
queue.addListener(listener, service);
CountDownLatch latch = new CountDownLatch(1);
Multiset<Object> counters = ConcurrentHashMultiset.create();
queue.enqueue(incrementingEvent(counters, listener, 1));
queue.enqueue(incrementingEvent(counters, listener, 2));
queue.enqueue(incrementingEvent(counters, listener, 3));
queue.enqueue(incrementingEvent(counters, listener, 4));
queue.enqueue(countDownEvent(latch));
assertEquals(0, counters.size());
queue.dispatch();
latch.await();
assertEquals(multiset(listener, 4), counters);
} finally {
service.shutdown();
}
}
public void testEnqueueAndDispatch_multithreaded_withThrowingRunnable()
throws InterruptedException {
Object listener = new Object();
ExecutorService service = newFixedThreadPool(4);
ListenerCallQueue<Object> queue = new ListenerCallQueue<>();
try {
queue.addListener(listener, service);
CountDownLatch latch = new CountDownLatch(1);
Multiset<Object> counters = ConcurrentHashMultiset.create();
queue.enqueue(incrementingEvent(counters, listener, 1));
queue.enqueue(THROWING_EVENT);
queue.enqueue(incrementingEvent(counters, listener, 2));
queue.enqueue(THROWING_EVENT);
queue.enqueue(incrementingEvent(counters, listener, 3));
queue.enqueue(THROWING_EVENT);
queue.enqueue(incrementingEvent(counters, listener, 4));
queue.enqueue(THROWING_EVENT);
queue.enqueue(countDownEvent(latch));
assertEquals(0, counters.size());
queue.dispatch();
latch.await();
assertEquals(multiset(listener, 4), counters);
} finally {
service.shutdown();
}
}
private ListenerCallQueue.Event<Object> incrementingEvent(
Multiset<Object> counters, Object expectedListener, int expectedCount) {
return incrementingEvent(counters, multiset(expectedListener, expectedCount));
}
private ListenerCallQueue.Event<Object> incrementingEvent(
Multiset<Object> counters, Multiset<Object> expected) {
return new ListenerCallQueue.Event<Object>() {
@Override
public void call(Object listener) {
counters.add(listener);
assertEquals(expected.count(listener), counters.count(listener));
}
@Override
public String toString() {
return "incrementing";
}
};
}
private static <T> ImmutableMultiset<T> multiset(T value, int count) {
return multiset(ImmutableMap.of(value, count));
}
private static <T> ImmutableMultiset<T> multiset(T value1, int count1, T value2, int count2) {
return multiset(ImmutableMap.of(value1, count1, value2, count2));
}
private static <T> ImmutableMultiset<T> multiset(Map<T, Integer> counts) {
ImmutableMultiset.Builder<T> builder = ImmutableMultiset.builder();
for (Entry<T, Integer> entry : counts.entrySet()) {
builder.addCopies(entry.getKey(), entry.getValue());
}
return builder.build();
}
private ListenerCallQueue.Event<Object> countDownEvent(CountDownLatch latch) {
return new ListenerCallQueue.Event<Object>() {
@Override
public void call(Object listener) {
latch.countDown();
}
@Override
public String toString() {
return "countDown";
}
};
}
} | java | github | https://github.com/google/guava | android/guava-tests/test/com/google/common/util/concurrent/ListenerCallQueueTest.java |
#include <stdio.h>
#include <lsxintrin.h>
int test()
{
const float src[] = { 0.0f, 1.0f, 2.0f, 3.0f};
v4f32 val = (v4f32)__lsx_vld((const float*)(src), 0);
return __lsx_vpickve2gr_w(__lsx_vftint_w_s(val), 3);
}
int main()
{
printf("%d\n", test());
return 0;
} | cpp | github | https://github.com/opencv/opencv | cmake/checks/cpu_lsx.cpp |
import pip.download
from pip.commands.search import (compare_versions,
highest_version,
transform_hits,
SearchCommand)
from pip.status_codes import NO_MATCHES_FOUND, SUCCESS
from pip.backwardcompat import xmlrpclib, b
from mock import Mock
from tests.test_pip import run_pip, reset_env, pyversion
from tests.pypi_server import assert_equal
if pyversion >= '3':
VERBOSE_FALSE = False
else:
VERBOSE_FALSE = 0
def test_version_compare():
"""
Test version comparison.
"""
assert compare_versions('1.0', '1.1') == -1
assert compare_versions('1.1', '1.0') == 1
assert compare_versions('1.1a1', '1.1') == -1
assert compare_versions('1.1.1', '1.1a') == -1
assert highest_version(['1.0', '2.0', '0.1']) == '2.0'
assert highest_version(['1.0a1', '1.0']) == '1.0'
def test_pypi_xml_transformation():
"""
Test transformation of data structures (pypi xmlrpc to custom list).
"""
pypi_hits = [{'_pypi_ordering': 100, 'name': 'foo', 'summary': 'foo summary', 'version': '1.0'},
{'_pypi_ordering': 200, 'name': 'foo', 'summary': 'foo summary v2', 'version': '2.0'},
{'_pypi_ordering': 50, 'name': 'bar', 'summary': 'bar summary', 'version': '1.0'}]
expected = [{'score': 200, 'versions': ['1.0', '2.0'], 'name': 'foo', 'summary': 'foo summary v2'},
{'score': 50, 'versions': ['1.0'], 'name': 'bar', 'summary': 'bar summary'}]
assert_equal(expected, transform_hits(pypi_hits))
def test_search():
"""
End to end test of search command.
"""
reset_env()
output = run_pip('search', 'pip')
assert 'pip installs packages' in output.stdout
def test_multiple_search():
"""
Test searching for multiple packages at once.
"""
reset_env()
output = run_pip('search', 'pip', 'INITools')
assert 'pip installs packages' in output.stdout
assert 'Tools for parsing and using INI-style files' in output.stdout
def test_searching_through_Search_class():
"""
Verify if ``pip.vcs.Search`` uses tests xmlrpclib.Transport class
"""
original_xmlrpclib_transport = pip.download.xmlrpclib_transport
pip.download.xmlrpclib_transport = fake_transport = Mock()
query = 'mylittlequerythatdoesnotexists'
dumped_xmlrpc_request = b(xmlrpclib.dumps(({'name': query, 'summary': query}, 'or'), 'search'))
expected = [{'_pypi_ordering': 100, 'name': 'foo', 'summary': 'foo summary', 'version': '1.0'}]
fake_transport.request.return_value = (expected,)
pypi_searcher = SearchCommand()
result = pypi_searcher.search(query, 'http://pypi.python.org/pypi')
try:
assert expected == result, result
fake_transport.request.assert_called_with('pypi.python.org', '/pypi', dumped_xmlrpc_request, verbose=VERBOSE_FALSE)
finally:
pip.download.xmlrpclib_transport = original_xmlrpclib_transport
def test_search_missing_argument():
"""
Test missing required argument for search
"""
env = reset_env(use_distribute=True)
result = run_pip('search', expect_error=True)
assert 'ERROR: Missing required argument (search query).' in result.stdout
def test_run_method_should_return_sucess_when_find_packages():
"""
Test SearchCommand.run for found package
"""
options_mock = Mock()
options_mock.index = 'http://pypi.python.org/pypi'
search_cmd = SearchCommand()
status = search_cmd.run(options_mock, ('pip',))
assert status == SUCCESS
def test_run_method_should_return_no_matches_found_when_does_not_find_packages():
"""
Test SearchCommand.run for no matches
"""
options_mock = Mock()
options_mock.index = 'http://pypi.python.org/pypi'
search_cmd = SearchCommand()
status = search_cmd.run(options_mock, ('non-existant-package',))
assert status == NO_MATCHES_FOUND, status
def test_search_should_exit_status_code_zero_when_find_packages():
"""
Test search exit status code for package found
"""
env = reset_env(use_distribute=True)
result = run_pip('search', 'pip')
assert result.returncode == SUCCESS
def test_search_exit_status_code_when_finds_no_package():
"""
Test search exit status code for no matches
"""
env = reset_env(use_distribute=True)
result = run_pip('search', 'non-existant-package', expect_error=True)
assert result.returncode == NO_MATCHES_FOUND | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# bhmm documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 17 11:49:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('sphinxext')) # for sphinx extensions
sys.path.insert(0, os.path.abspath('themes/sphinx_rtd_theme-0.1.5')) # for themes
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bhmm'
copyright = u'2015, John D. Chodera and Frank Noe'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/bhmm-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bhmmdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bhmm.tex', u'bhmm Documentation',
u'John D. Chodera and Frank Noe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bhmm', u'bhmm Documentation',
[u'John D. Chodera and Frank Noe'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bhmm', u'bhmm Documentation',
u'John D. Chodera and Frank Noe', 'bhmm', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | unknown | codeparrot/codeparrot-clean | ||
/* Copyright 2022 - 2025 R. Thomas
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_PDB_TYPE_ATTRIBUTE_H
#define LIEF_PDB_TYPE_ATTRIBUTE_H
#include "LIEF/visibility.h"
#include <cstdint>
#include <string>
#include <memory>
namespace LIEF {
namespace pdb {
class Type;
namespace types {
namespace details {
class Attribute;
class AttributeIt;
}
/// This class represents an attribute (`LF_MEMBER`) in an aggregate (class,
/// struct, union, ...)
class LIEF_API Attribute {
public:
class LIEF_API Iterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = std::unique_ptr<Attribute>;
using difference_type = std::ptrdiff_t;
using pointer = Attribute*;
using reference = Attribute&;
using implementation = details::AttributeIt;
class LIEF_API PointerProxy {
// Inspired from LLVM's iterator_facade_base
friend class Iterator;
public:
pointer operator->() const { return R.get(); }
private:
value_type R;
template <typename RefT>
PointerProxy(RefT &&R) : R(std::forward<RefT>(R)) {} // NOLINT(bugprone-forwarding-reference-overload)
};
Iterator(const Iterator&);
Iterator(Iterator&&) noexcept;
Iterator(std::unique_ptr<details::AttributeIt> impl);
~Iterator();
friend LIEF_API bool operator==(const Iterator& LHS, const Iterator& RHS);
friend LIEF_API bool operator!=(const Iterator& LHS, const Iterator& RHS) {
return !(LHS == RHS);
}
Iterator& operator++();
Iterator operator++(int) {
Iterator tmp = *static_cast<Iterator*>(this);
++*static_cast<Iterator *>(this);
return tmp;
}
std::unique_ptr<Attribute> operator*() const;
PointerProxy operator->() const {
return static_cast<const Iterator*>(this)->operator*();
}
private:
std::unique_ptr<details::AttributeIt> impl_;
};
public:
Attribute(std::unique_ptr<details::Attribute> impl);
/// Name of the attribute
std::string name() const;
/// Type of this attribute
std::unique_ptr<Type> type() const;
/// Offset of this attribute in the aggregate
uint64_t field_offset() const;
~Attribute();
private:
std::unique_ptr<details::Attribute> impl_;
};
}
}
}
#endif | unknown | github | https://github.com/nodejs/node | deps/LIEF/include/LIEF/PDB/types/Attribute.hpp |
# Benchmarks
Benchmarks are tests to measure the performance of pandas. There are two different
kinds of benchmarks relevant to pandas:
* Internal pandas benchmarks to measure speed and memory usage over time
* Community benchmarks comparing the speed or memory usage of different tools at
doing the same job
## pandas benchmarks
pandas benchmarks are implemented in the [asv_bench](https://github.com/pandas-dev/pandas/tree/main/asv_bench)
directory of our repository. The benchmarks are implemented for the
[airspeed velocity](https://asv.readthedocs.io/en/latest/) (asv for short) framework.
The benchmarks can be run locally by any pandas developer. This can be done
with the `asv run` command, and it can be useful to detect if local changes have
an impact in performance, by running the benchmarks before and after the changes.
More information on running the performance test suite is found
[here](https://pandas.pydata.org/docs/dev/development/contributing_codebase.html#running-the-performance-test-suite).
Note that benchmarks are not deterministic, and running in different hardware or
running in the same hardware with different levels of stress have a big impact in
the result. Even running the benchmarks with identical hardware and almost identical
conditions can produce significant differences when running the same exact code.
## Automated benchmark runner
The [asv-runner](https://github.com/pandas-dev/asv-runner/) repository automatically runs the pandas asv benchmark suite
for every (or almost every) commit to the `main` branch. It is run on GitHub actions.
See the linked repository for more details. The results are available at:
[https://pandas-dev.github.io/asv-runner/](https://pandas-dev.github.io/asv-runner/)
## Community benchmarks
The main benchmarks comparing dataframe tools that include pandas are:
- [DuckDB (former H2O.ai) benchmarks](https://duckdblabs.github.io/db-benchmark/)
- [TPCH benchmarks](https://pola.rs/posts/benchmarks/) | unknown | github | https://github.com/pandas-dev/pandas | web/pandas/community/benchmarks.md |
# Unix SMB/CIFS implementation.
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Tests for samba.dcerpc.registry."""
from samba.dcerpc import winreg
from samba.tests import RpcInterfaceTestCase
class WinregTests(RpcInterfaceTestCase):
def setUp(self):
super(WinregTests, self).setUp()
self.conn = winreg.winreg("ncalrpc:", self.get_loadparm(),
self.get_credentials())
def get_hklm(self):
return self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
def test_hklm(self):
handle = self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
self.conn.CloseKey(handle)
def test_getversion(self):
handle = self.get_hklm()
version = self.conn.GetVersion(handle)
self.assertEquals(int, version.__class__)
self.conn.CloseKey(handle)
def test_getkeyinfo(self):
handle = self.conn.OpenHKLM(None,
winreg.KEY_QUERY_VALUE | winreg.KEY_ENUMERATE_SUB_KEYS)
x = self.conn.QueryInfoKey(handle, winreg.String())
self.assertEquals(9, len(x)) # should return a 9-tuple
self.conn.CloseKey(handle) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2013 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironicclient import exc as ironic_exc
import mock
import six
from webob import exc
from nova.api.openstack.compute import baremetal_nodes \
as b_nodes_v21
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.virt.ironic import utils as ironic_utils
def fake_node(**updates):
node = {
'id': 1,
'service_host': "host",
'cpus': 8,
'memory_mb': 8192,
'local_gb': 128,
'pm_address': "10.1.2.3",
'pm_user': "pm_user",
'pm_password': "pm_pass",
'terminal_port': 8000,
'interfaces': [],
'instance_uuid': 'fake-instance-uuid',
}
if updates:
node.update(updates)
return node
def fake_node_ext_status(**updates):
node = fake_node(uuid='fake-uuid',
task_state='fake-task-state',
updated_at='fake-updated-at',
pxe_config_path='fake-pxe-config-path')
if updates:
node.update(updates)
return node
FAKE_IRONIC_CLIENT = ironic_utils.FakeClient()
@mock.patch.object(b_nodes_v21, '_get_ironic_client',
lambda *_: FAKE_IRONIC_CLIENT)
class BareMetalNodesTestV21(test.NoDBTestCase):
mod = b_nodes_v21
def setUp(self):
super(BareMetalNodesTestV21, self).setUp()
self._setup()
self.context = context.get_admin_context()
self.request = fakes.HTTPRequest.blank('', use_admin_context=True)
def _setup(self):
self.controller = b_nodes_v21.BareMetalNodeController()
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic(self, mock_list):
properties = {'cpus': 2, 'memory_mb': 1024, 'local_gb': 20}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': properties['memory_mb'],
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list')
def test_index_ironic_missing_properties(self, mock_list):
properties = {'cpus': 2}
node = ironic_utils.get_test_node(properties=properties)
mock_list.return_value = [node]
res_dict = self.controller.index(self.request)
expected_output = {'nodes':
[{'memory_mb': 0,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}]}
self.assertEqual(expected_output, res_dict)
mock_list.assert_called_once_with(detail=True)
def test_index_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
self.assertRaises(exc.HTTPNotImplemented,
self.controller.index,
self.request)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': properties['memory_mb'],
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': properties['local_gb'],
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': properties['cpus']}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_properties(self, mock_get, mock_list_ports):
properties = {}
node = ironic_utils.get_test_node(properties=properties)
port = ironic_utils.get_test_port()
mock_get.return_value = node
mock_list_ports.return_value = [port]
res_dict = self.controller.show(self.request, node.uuid)
expected_output = {'node':
{'memory_mb': 0,
'instance_uuid': None,
'host': 'IRONIC MANAGED',
'disk_gb': 0,
'interfaces': [{'address': port.address}],
'task_state': None,
'id': node.uuid,
'cpus': 0}}
self.assertEqual(expected_output, res_dict)
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'list_ports')
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get')
def test_show_ironic_no_interfaces(self, mock_get, mock_list_ports):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
mock_get.return_value = node
mock_list_ports.return_value = []
res_dict = self.controller.show(self.request, node.uuid)
self.assertEqual([], res_dict['node']['interfaces'])
mock_get.assert_called_once_with(node.uuid)
mock_list_ports.assert_called_once_with(node.uuid)
@mock.patch.object(FAKE_IRONIC_CLIENT.node, 'get',
side_effect=ironic_exc.NotFound())
def test_show_ironic_node_not_found(self, mock_get):
error = self.assertRaises(exc.HTTPNotFound, self.controller.show,
self.request, 'fake-uuid')
self.assertIn('fake-uuid', six.text_type(error))
def test_show_ironic_not_implemented(self):
with mock.patch.object(self.mod, 'ironic_client', None):
properties = {'cpus': 1, 'memory_mb': 512, 'local_gb': 10}
node = ironic_utils.get_test_node(properties=properties)
self.assertRaises(exc.HTTPNotImplemented, self.controller.show,
self.request, node.uuid)
def test_create_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.create,
self.request, {'node': object()})
def test_delete_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller.delete,
self.request, 'fake-id')
def test_add_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._add_interface,
self.request, 'fake-id', 'fake-body')
def test_remove_interface_ironic_not_supported(self):
self.assertRaises(exc.HTTPBadRequest,
self.controller._remove_interface,
self.request, 'fake-id', 'fake-body')
class BareMetalNodesTestDeprecation(test.NoDBTestCase):
def setUp(self):
super(BareMetalNodesTestDeprecation, self).setUp()
self.controller = b_nodes_v21.BareMetalNodeController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller._add_interface, self.req, fakes.FAKE_UUID, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller._remove_interface, self.req, fakes.FAKE_UUID, {}) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that when the same value is repeated for a gyp define, duplicates are
stripped from the regeneration rule.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make and Android generators.
test = TestGyp.TestGyp(formats=['make', 'android'])
os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value'
test.run_gyp('defines.gyp')
test.build('defines.gyp')
# The last occurrence of a repeated set should take precedence over other
# values. See gyptest-multiple-values.py.
test.must_contain('action.txt', 'repeated_value')
# So the regeneration rule needs to use the correct order.
test.must_not_contain(
'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"')
test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"')
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
os.utime("defines.gyp", None)
test.build('defines.gyp')
test.must_contain('action.txt', 'repeated_value')
test.pass_test() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
# This file is part of emesene.
#
# emesene is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# emesene is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with emesene; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Dummy Implementation of a Network Manager."""
import logging
log = logging.getLogger("emesene.e3.common.NetworkManagerHelper")
import extension
class DummyNetworkChecker():
''' this class does lazy checks for network availability and
disconnects emesene if the network goes down '''
#Public methods
def set_new_session(self, session):
pass
def stop(self):
pass
extension.category_register('network checker', DummyNetworkChecker)
extension.set_default('network checker', DummyNetworkChecker) | unknown | codeparrot/codeparrot-clean | ||
<?php
namespace Illuminate\Http\Client;
use GuzzleHttp\Utils;
/**
* @mixin \Illuminate\Http\Client\Factory
*/
class Pool
{
/**
* The factory instance.
*
* @var \Illuminate\Http\Client\Factory
*/
protected $factory;
/**
* The handler function for the Guzzle client.
*
* @var callable
*/
protected $handler;
/**
* The pool of requests.
*
* @var array<array-key, \Illuminate\Http\Client\PendingRequest>
*/
protected $pool = [];
/**
* Create a new requests pool.
*
* @param \Illuminate\Http\Client\Factory|null $factory
*/
public function __construct(?Factory $factory = null)
{
$this->factory = $factory ?: new Factory();
$this->handler = Utils::chooseHandler();
}
/**
* Add a request to the pool with a numeric index.
*
* @return \Illuminate\Http\Client\PendingRequest|\GuzzleHttp\Promise\Promise
*/
public function newRequest()
{
return $this->pool[] = $this->asyncRequest();
}
/**
* Add a request to the pool with a key.
*
* @param string $key
* @return \Illuminate\Http\Client\PendingRequest
*/
public function as(string $key)
{
return $this->pool[$key] = $this->asyncRequest();
}
/**
* Retrieve a new async pending request.
*
* @return \Illuminate\Http\Client\PendingRequest
*/
protected function asyncRequest()
{
return $this->factory->setHandler($this->handler)->async();
}
/**
* Retrieve the requests in the pool.
*
* @return array<array-key, \Illuminate\Http\Client\PendingRequest>
*/
public function getRequests()
{
return $this->pool;
}
/**
* Add a request to the pool with a numeric index and forward the method call to the request.
*
* @param string $method
* @param array $parameters
* @return \Illuminate\Http\Client\PendingRequest|\GuzzleHttp\Promise\Promise
*/
public function __call($method, $parameters)
{
return $this->newRequest()->{$method}(...$parameters);
}
} | php | github | https://github.com/laravel/framework | src/Illuminate/Http/Client/Pool.php |
# -*- coding: utf-8 -*-
# $Id: ja.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Hisashi Morita <hisashim@kt.rim.or.jp>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Japanese-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': u'著者',
'authors': u'著者',
'organization': u'組織',
'address': u'住所',
'contact': u'連絡先',
'version': u'バージョン',
'revision': u'リビジョン',
'status': u'ステータス',
'date': u'日付',
'copyright': u'著作権',
'dedication': u'献辞',
'abstract': u'概要',
'attention': u'注目!',
'caution': u'注意!',
'danger': u'!危険!',
'error': u'エラー',
'hint': u'ヒント',
'important': u'重要',
'note': u'備考',
'tip': u'通報',
'warning': u'警告',
'contents': u'目次'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'著者': 'author',
u' n/a': 'authors',
u'組織': 'organization',
u'住所': 'address',
u'連絡先': 'contact',
u'バージョン': 'version',
u'リビジョン': 'revision',
u'ステータス': 'status',
u'日付': 'date',
u'著作権': 'copyright',
u'献辞': 'dedication',
u'概要': 'abstract'}
"""Japanese (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order.""" | unknown | codeparrot/codeparrot-clean | ||
"""Test context locators."""
from __future__ import absolute_import
from unittest import TestCase
import threading
from mock import sentinel
from eventtracking import locator
class TestThreadLocalContextLocator(TestCase):
"""Test context locators."""
def test_multithreaded_context(self):
self.locator = locator.ThreadLocalContextLocator() # pylint: disable=attribute-defined-outside-init
# Events emitted from the parent thread should have this context
context = {sentinel.context_key: sentinel.context_value}
# Events emitted from the child thread should have this context
thread_context = {sentinel.thread_key: sentinel.thread_value}
# Set once the child thread has setup its context
thread_in_context = threading.Event()
# Set once the parent has emitted its event
parent_sent_event = threading.Event()
def worker():
"""A simulated child thread"""
# Setup a context in this thread. This should not inherit the parent thread context!
self.locator.get()['child'] = thread_context
# At this point both the parent and child threads have entered their contexts, but
# the child thread should only "see" the context it setup.
self.assertEquals(self.locator.get(), {'child': thread_context})
# Notify the parent that the child has setup its context. At this point both the
# parent and the child have entered their own contexts.
thread_in_context.set()
# Wait for the parent to emit its event
parent_sent_event.wait()
del self.locator.get()['child']
self.assertEquals(self.locator.get(), {})
self.locator.get()['parent'] = context
other_thread = threading.Thread(target=worker)
other_thread.start()
# Wait for the thread to setup its context.
thread_in_context.wait()
# At this point both the parent and child threads have entered their contexts, but
# the parent thread should only "see" the context it setup.
self.assertEquals(self.locator.get(), {'parent': context})
# Notify the thread that it can send its event and exit
parent_sent_event.set()
other_thread.join()
del self.locator.get()['parent']
self.assertEquals(self.locator.get(), {}) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
from nose.tools import *
from nose import SkipTest
import networkx as nx
from base_test import BaseTestAttributeMixing,BaseTestDegreeMixing
from networkx.algorithms.assortativity.correlation import attribute_ac
class TestDegreeMixingCorrelation(BaseTestDegreeMixing):
@classmethod
def setupClass(cls):
global np
global npt
try:
import numpy as np
import numpy.testing as npt
except ImportError:
raise SkipTest('NumPy not available.')
try:
import scipy
import scipy.stats
except ImportError:
raise SkipTest('SciPy not available.')
def test_degree_assortativity_undirected(self):
r=nx.degree_assortativity_coefficient(self.P4)
npt.assert_almost_equal(r,-1.0/2,decimal=4)
def test_degree_assortativity_directed(self):
r=nx.degree_assortativity_coefficient(self.D)
npt.assert_almost_equal(r,-0.57735,decimal=4)
def test_degree_assortativity_multigraph(self):
r=nx.degree_assortativity_coefficient(self.M)
npt.assert_almost_equal(r,-1.0/7.0,decimal=4)
def test_degree_assortativity_undirected(self):
r=nx.degree_pearson_correlation_coefficient(self.P4)
npt.assert_almost_equal(r,-1.0/2,decimal=4)
def test_degree_assortativity_directed(self):
r=nx.degree_pearson_correlation_coefficient(self.D)
npt.assert_almost_equal(r,-0.57735,decimal=4)
def test_degree_assortativity_multigraph(self):
r=nx.degree_pearson_correlation_coefficient(self.M)
npt.assert_almost_equal(r,-1.0/7.0,decimal=4)
class TestAttributeMixingCorrelation(BaseTestAttributeMixing):
@classmethod
def setupClass(cls):
global np
global npt
try:
import numpy as np
import numpy.testing as npt
except ImportError:
raise SkipTest('NumPy not available.')
def test_attribute_assortativity_undirected(self):
r=nx.attribute_assortativity_coefficient(self.G,'fish')
assert_equal(r,6.0/22.0)
def test_attribute_assortativity_directed(self):
r=nx.attribute_assortativity_coefficient(self.D,'fish')
assert_equal(r,1.0/3.0)
def test_attribute_assortativity_multigraph(self):
r=nx.attribute_assortativity_coefficient(self.M,'fish')
assert_equal(r,1.0)
def test_attribute_assortativity_coefficient(self):
# from "Mixing patterns in networks"
a=np.array([[0.258,0.016,0.035,0.013],
[0.012,0.157,0.058,0.019],
[0.013,0.023,0.306,0.035],
[0.005,0.007,0.024,0.016]])
r=attribute_ac(a)
npt.assert_almost_equal(r,0.623,decimal=3)
def test_attribute_assortativity_coefficient2(self):
a=np.array([[0.18,0.02,0.01,0.03],
[0.02,0.20,0.03,0.02],
[0.01,0.03,0.16,0.01],
[0.03,0.02,0.01,0.22]])
r=attribute_ac(a)
npt.assert_almost_equal(r,0.68,decimal=2)
def test_attribute_assortativity(self):
a=np.array([[50,50,0],[50,50,0],[0,0,2]])
r=attribute_ac(a)
npt.assert_almost_equal(r,0.029,decimal=3) | unknown | codeparrot/codeparrot-clean | ||
import atexit
import glob
from inspect import isfunction, getargspec, ismethod
from itertools import islice
import os
import sys
def load_gremthon_jars():
for jar_dir in os.environ.get('GREMTHON_JAR_DIRS', '').split(':'):
for jar in glob.glob(os.path.join(jar_dir, '*.jar')):
if jar not in sys.path:
sys.path.append(jar)
load_gremthon_jars()
#gremlin related imports (Java)
from com.tinkerpop.blueprints import Direction, Predicate, Vertex, Edge
from com.tinkerpop.blueprints.impls.tg import TinkerGraphFactory
from com.tinkerpop.blueprints.util.io.graphson import GraphSONWriter, GraphSONReader
from com.tinkerpop.gremlin import Tokens
from com.tinkerpop.gremlin.java import GremlinPipeline
from com.tinkerpop.pipes.sideeffect import GroupByPipe
from com.tinkerpop.pipes import PipeFunction
from com.tinkerpop.pipes.util import PipesFunction
from com.tinkerpop.pipes.util import FluentUtility
from com.tinkerpop.pipes.util.structures import AsMap
from java.util import ArrayList, Collection, HashMap, Map
from java.lang import Float, String
from java.io import FileOutputStream, FileInputStream
class GremthonEdge(Edge):
def __init__(self, edge):
self.edge = edge
def __eq__(self, other):
return self == other or self.edge == other
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
return cmp(self.edge)
def __hash__(self):
return hash(self.edge)
@property
def properties(self):
return self.edge.propertyKeys
def get_property(self, item):
if item in self.edge.propertyKeys:
return self.edge.getProperty(item)
if hasattr(self.edge, item):
return getattr(self.edge, item)
return None
def __getitem__(self, item):
return self.get_property(item)
def __getattr__(self, item):
value = self.get_property(item)
if value is None:
# check to see if there's support for this attribute on the pipeline
if hasattr(GremthonPipeline, item):
pipeline = GremthonPipeline(GremlinPipeline(self.edge))
pipeline_attr = getattr(pipeline, item)
if ismethod(pipeline_attr):
return pipeline_attr
return value
def keys(self):
return self.edge.getPropertyKeys()
def values(self):
values = []
for key in self.edge.getPropertyKeys():
values.append(self.edge.getProperty(key))
return values
@property
def label(self):
return self.edge.getLabel()
@property
def in_vertex(self):
return self.edge.getVertex(Direction.IN)
@property
def out_vertex(self):
return self.edge.getVertex(Direction.OUT)
def __str__(self):
return '{0}'.format(self.edge)
def __repr__(self):
return self.__str__()
@property
def id(self):
return self.get_property('id')
class GremthonVertex(Vertex):
def __init__(self, vertex):
self.vertex = vertex
def __eq__(self, other):
return self is other or self.vertex is other or self == other or self.vertex == other
def __ne__(self, other):
return not self.__eq__(other)
def __cmp__(self, other):
return cmp(self.vertex)
def __hash__(self):
return hash(self.vertex)
@property
def properties(self):
return self.vertex.propertyKeys
def get_property(self, item):
if item in self.vertex.propertyKeys:
return self.vertex.getProperty(item)
if hasattr(self.vertex, item):
return getattr(self.vertex, item)
return None
def __getitem__(self, item):
return self.get_property(item)
def __getattr__(self, item):
value = self.get_property(item)
if value is None:
# check to see if there's support for this attribute on the pipeline
if hasattr(GremthonPipeline, item):
pipeline = GremthonPipeline(GremlinPipeline(self.vertex))
pipeline_attr = getattr(pipeline, item)
if ismethod(pipeline_attr):
return pipeline_attr
return value
def __str__(self):
return '{0}'.format(self.vertex)
def __repr__(self):
return self.__str__()
def keys(self):
return self.vertex.getPropertyKeys()
def values(self):
values = []
for key in self.vertex.getPropertyKeys():
values.append(self.vertex.getProperty(key))
return values
@property
def id(self):
return self.get_property('id')
@property
def out(self):
return GremthonPipeline(self.vertex.out)
@property
def edges(self):
for edge in self.vertex.getEdges():
yield GremthonEdge(edge)
def map_gremthon_type(obj):
if isinstance(obj, Edge):
return GremthonEdge(obj)
elif isinstance(obj, Vertex):
return GremthonVertex(obj)
elif isinstance(obj, HashMap):
return dict(obj)
elif isinstance(obj, ArrayList):
return list(obj)
else:
return obj
class GremthonPipesFunction(PipesFunction):
def __init__(self, function, as_map=None):
self.function = function
self.do_as_map = False
self.as_map = as_map
self.set_as_map(as_map)
def set_as_map(self, as_map):
self.as_map = as_map
self.do_as_map = as_map is not None and self.function is not None and len(getargspec(self.function).args) > 1
setAsMap = set_as_map
def getAsMap(self):
return self.as_map
def compute(self, argument):
if self.function:
if self.do_as_map:
return self.function(map_gremthon_type(argument), self.as_map)
else:
return self.function(map_gremthon_type(argument))
else:
return None
class GremthonPredicate(Predicate):
def __init__(self, function):
self.function = function
def evaluate(self, first, second):
return self.function(first, second)
class GremthonPipeline(object):
def __init__(self, pipeline):
self.pipeline = pipeline
self.as_map = AsMap(self.pipeline)
def __iter__(self):
for item in self.pipeline:
yield map_gremthon_type(item)
def __getitem__(self, item):
if isinstance(item, int) and item >= 0:
return islice(self, item, item + 1)
elif isinstance(item, slice):
return islice(self, item.start, item.stop, item.step)
else:
raise KeyError("Item must be non-negative number or slice (not {0})".format(item))
def __getattr__(self, attribute):
return [getattr(item, attribute, None) for item in self]
def __repr__(self):
return '\n'.join([str(map_gremthon_type(item)) for item in self.pipeline.toList()])
def add(self, pipe):
self.pipeline.addPipe(pipe)
return self
def both(self, *args):
if len(args) > 1 and isinstance(args[0], int):
return self.__class__(self.pipeline.both(args[0], args[1:]))
else:
return self.__class__(self.pipeline.both(args))
def both_e(self, *args):
if len(args) > 1 and isinstance(args[0], int):
return self.__class__(self.pipeline.bothE(args[0], args[1:]))
else:
return self.__class__(self.pipeline.bothE(args))
def both_v(self):
return self.__class__(self.pipeline.bothV())
def E(self, key=None, value=None):
if key and value:
return self.__class__(self.pipeline.E(key, value))
else:
return self.__class__(self.pipeline.E())
def V(self, key=None, value=None):
if key and value:
return self.__class__(self.pipeline.V(key, value))
else:
return self.__class__(self.pipeline.V())
def has(self, key=None, value=None, compare_token=None, predicate=None):
if predicate is not None:
if isfunction(predicate):
return self.__class__(self.pipeline.has(key, GremthonPredicate(predicate), value))
elif hasattr(predicate, 'evaluate'):
return self.__class__(self.pipeline.has(key, predicate, value))
else:
raise ValueError('Incorrect value for predicate. Must be function or implement Predicate interface')
if compare_token is not None:
return self.__class__(self.pipeline.has(key, getattr(Tokens.T, compare_token, None), value))
if key is not None and value is None:
return self.__class__(self.pipeline.has(key))
else:
return self.__class__(self.pipeline.has(key, value))
def has_not(self, key):
return self.__class__(self.pipeline.hasNot(key))
def interval(self, key, start, end):
if isinstance(start, float):
start_value = Float(start)
else:
start_value = start
if isinstance(end, float):
end_value = Float(end)
else:
end_value = end
return self.__class__(self.pipeline.interval(key, start_value, end_value))
def id_edge(self, graph):
return self.__class__(self.pipeline.idEdge(graph))
def id_vertex(self, graph):
return self.__class__(self.pipeline.idVertex(graph))
def id(self):
return self.__class__(self.pipeline.id())
def in_e(self, *args):
if len(args) > 1 and isinstance(args[0], int):
return self.__class__(self.pipeline.inE(args[0], args[1:]))
else:
return self.__class__(self.pipeline.inE(args))
def in_v(self):
return self.__class__(self.pipeline.inV())
def in_(self, *args):
in_method = getattr(self.pipeline, 'in')
if len(args) > 1 and isinstance(args[0], int):
return self.__class__(in_method(args[0], args[1:]))
else:
return self.__class__(in_method(args))
def label(self):
return self.__class__(self.pipeline.label())
def out_e(self, *args):
return self.__class__(self.pipeline.outE(args))
def out(self, *args):
if len(args) > 1 and isinstance(args[0], int):
return self.__class__(self.pipeline.out(args[0], args[1:]))
else:
return self.__class__(self.pipeline.out(args))
def out_v(self):
return self.__class__(self.pipeline.outV())
def map(self, *args):
return self.__class__(self.pipeline.map(args))
def property_(self, key):
return self.__class__(self.pipeline.property(key))
def step(self, func):
return self.__class__(self.pipeline.step(func))
def copy_split(self, *args):
return self.__class__(self.pipeline.copySplit(args))
def exhaust_merge(self):
return self.__class__(self.pipeline.exhaustMerge())
def fair_merge(self):
return self.__class__(self.pipeline.fairMerge())
def if_then_else(self, if_function, then_function, else_function):
return self.__class__(self.pipeline.ifThenElse(if_function, then_function, else_function))
def loop(self, step, while_function, emit_function=None):
if emit_function:
return self.__class__(self.pipeline.loop(step, while_function, emit_function))
return self.__class__(self.pipeline.loop(step, while_function))
def and_(self, *args):
and_method = getattr(self.pipeline, 'and')
return self.__class__(and_method(args))
def back(self, named_step):
return self.__class__(self.pipeline.back(named_step))
def dedup(self, dedup_func=None):
if dedup_func is None:
return self.__class__(self.pipeline.dedup())
else:
return self.__class__(self.pipeline.dedup(dedup_func))
def except_(self, *args):
#TODO: debug except
except_method = getattr(self.pipeline, 'except')
if len(args) == 1 and isinstance(args[0], (Collection, list, tuple)):
return self.__class__(except_method(ArrayList(args[0])))
else:
return self.__class__(except_method(args))
def filter(self, filter_func):
return self.__class__(self.pipeline.filter(GremthonPipesFunction(filter_func, as_map=self.as_map)))
def or_(self, *args):
or_method = getattr(self.pipeline, 'or')
return self.__class__(or_method(args))
def random(self, bias):
return self.__class__(self.pipeline.random(bias))
def range(self, low, high):
return self.__class__(self.pipeline.range(low, high))
def retain(self, *args):
if len(args) == 1 and isinstance(args[0], (Collection, list, tuple)):
return self.__class__(self.pipeline.retain(args[0]))
else:
return self.__class__(self.pipeline.retain(args))
def simple_path(self):
return self.__class__(self.pipeline.simplePath())
def aggregate(self, aggregate=None, aggregate_func=None):
if aggregate is None and aggregate_func is None:
return self.__class__(self.pipeline.aggregate())
elif aggregate and isinstance(aggregate, (Collection, list, tuple)) and aggregate_func is None:
return self.__class__(self.pipeline.aggregate(aggregate))
elif aggregate_func and aggregate is None:
return self.__class__(self.pipeline.aggregate(GremthonPipesFunction(aggregate_func, as_map=self.as_map)))
elif aggregate and not isinstance(aggregate, (Collection, list, tuple)) and aggregate_func is None:
if isfunction(aggregate):
return self.__class__(self.pipeline.aggregate(GremthonPipesFunction(aggregate, as_map=self.as_map)))
else:
return self.__class__(self.pipeline.aggregate(aggregate))
else:
return self.__class__(self.pipeline.aggregate(aggregate, GremthonPipesFunction(aggregate_func, as_map=self.as_map) if isfunction(aggregate_func) else aggregate_func))
def optional(self, named_step):
return self.__class__(self.pipeline.optional(named_step))
def group_by(self, *args):
if len(args) == 2:
key_func = GremthonPipesFunction(args[0], as_map=self.as_map) if isfunction(args[0]) else args[0]
value_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
return self.__class__(self.pipeline.add(GroupByPipe(FluentUtility.prepareFunction(self.as_map, key_func), FluentUtility.prepareFunction(self.as_map, value_func))))
elif len(args) == 3:
if isinstance(args[0], Map):
map_obj = args[0]
key_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
value_func = GremthonPipesFunction(args[2], as_map=self.as_map) if isfunction(args[2]) else args[2]
return self.__class__(self.pipeline.groupBy(map_obj, key_func, value_func))
else:
key_func = GremthonPipesFunction(args[0], as_map=self.as_map) if isfunction(args[0]) else args[0]
value_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
reduce_func = GremthonPipesFunction(args[2], as_map=self.as_map) if isfunction(args[2]) else args[2]
return self.__class__(self.pipeline.groupBy(key_func, value_func, reduce_func))
elif len(args) == 4:
map_obj = args[0]
key_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
value_func = GremthonPipesFunction(args[2], as_map=self.as_map) if isfunction(args[2]) else args[2]
reduce_func = GremthonPipesFunction(args[3], as_map=self.as_map) if isfunction(args[3]) else args[3]
return self.__class__(self.pipeline.groupBy(map_obj, key_func, value_func, reduce_func))
def group_count(self, *args):
if len(args) == 0:
return self.__class__(self.pipeline.groupCount())
elif len(args) == 1:
if isfunction(args[0]) or isinstance(args[0], PipeFunction):
key_func = GremthonPipesFunction(args[0], as_map=self.as_map) if isfunction(args[0]) else args[0]
return self.__class__(self.pipeline.groupCount(key_func))
else:
map = args[0]
return self.__class__(self.pipeline.groupCount(map))
elif len(args) == 2:
if isinstance(args[0], (Map, dict)):
map = args[0]
key_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
return self.__class__(self.pipeline.groupCount(map, key_func))
else:
key_func = GremthonPipesFunction(args[0], as_map=self.as_map) if isfunction(args[0]) else args[0]
value_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
return self.__class__(self.pipeline.groupCount(key_func, value_func))
elif len(args) == 3:
map = args[0]
key_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
value_func = GremthonPipesFunction(args[2], as_map=self.as_map) if isfunction(args[2]) else args[2]
return self.__class__(self.pipeline.groupCount(map, key_func, value_func))
def link_out(self, label, named_step_other_vertex):
return self.__class__(self.pipeline.linkOut(label, named_step_other_vertex))
def link_in(self, label, named_step_other_vertex):
return self.__class__(self.pipeline.linkIn(label, named_step_other_vertex))
def link_both(self, label, named_step_other_vertex):
if isinstance(named_step_other_vertex, GremthonVertex):
return self.__class__(self.pipeline.linkBoth(label, named_step_other_vertex.vertex))
else:
return self.__class__(self.pipeline.linkBoth(label, named_step_other_vertex))
def side_effect(self, side_effect_func):
return self.__class__(self.pipeline.sideEffect(GremthonPipesFunction(side_effect_func, as_map=self.as_map)))
def store(self, *args):
if len(args) == 0:
return self.__class__(self.pipeline.store())
elif len(args) == 1:
if isfunction(args[0]) or isinstance(args[0], PipeFunction):
storage_func = GremthonPipesFunction(args[0], as_map=self.as_map) if isfunction(args[0]) else args[0]
return self.__class__(self.pipeline.store())
else:
storage = args[0]
return self.__class__(self.pipeline.store(storage))
elif len(args) == 2:
storage = args[0]
storage_func = GremthonPipesFunction(args[1], as_map=self.as_map) if isfunction(args[1]) else args[1]
return self.__class__(self.pipeline.store(storage, storage_func))
else:
return None
#TODO: table ?
#TODO: tree ?
def gather(self, gather_function=None):
if gather_function is None:
return self.__class__(self.pipeline.gather())
else:
return self.__class__(self.pipeline.gather(GremthonPipesFunction(gather_function, as_map=self.as_map)))
def memoize(self, named_step, map=None):
if map is None:
return self.__class__(self.pipeline.memoize(named_step))
else:
return self.__class__(self.pipeline.memoize(named_step, map))
def order(self):
#TODO: handle other variations
return self.__class__(self.pipeline.order())
def path(self, *args):
path_args = []
for arg in args:
if hasattr(arg, '__call__'):
path_args.append(GremthonPipesFunction(arg, as_map=self.as_map))
else:
path_args.append(arg)
return self.__class__(self.pipeline.path(tuple(path_args)))
def scatter(self):
return self.__class__(self.pipeline.scatter())
def select(self, *args):
select_args = []
for arg in args:
if hasattr(arg, '__call__'):
select_args.append(GremthonPipesFunction(arg, as_map=self.as_map))
else:
select_args.append(arg)
if len(select_args) >= 1 and isinstance(args[0], (Collection, list, tuple)):
return self.__class__(self.pipeline.select(select_args[0], select_args[1:]))
else:
return self.__class__(self.pipeline.select(tuple(select_args)))
def shuffle(self):
return self.__class__(self.pipeline.shuffle())
def cap(self):
return self.__class__(self.pipeline.cap())
#TODO: orderMap ?
def transform(self, transform_func):
return self.__class__(self.pipeline.transform(transform_func))
def as_(self, name):
as_method = getattr(self.pipeline, 'as')
return self.__class__(as_method(name))
def start(self, obj):
return self.__class__(self.pipeline.start(obj))
def count(self):
return self.pipeline.count()
def iterate(self):
self.pipeline.iterate()
def next(self, number=None):
if number:
return map_gremthon_type(self.pipeline.next(number))
else:
return map_gremthon_type(self.pipeline.next())
def to_list(self):
return self.pipeline.toList()
def fill(self, collection):
self.pipeline.fill(collection)
return collection
def enable_path(self):
self.pipeline.enablePath()
return self
def optimize(self, optimize):
self.pipeline.optimize(optimize)
return self
def remove(self):
self.pipeline.remove()
def _(self):
return self.__class__(self.pipeline._())
class GremthonManagementSystem(object):
def __init__(self, management_system, default_cardinality=None):
self.management_system = management_system
self.default_cardinality = default_cardinality
def __getitem__(self, path):
return self.management_system.get(path)
def __setitem__(self, key, value):
return self.management_system.set(key, value)
def __repr__(self):
return self.management_system
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.commit()
@property
def open(self):
return self.management_system.isOpen()
def close(self):
self.management_system.close()
def commit(self):
self.management_system.commit()
def rollback(self):
self.management_system.rollback()
def build_edge_index(self, label, name, keys=None, direction=Direction.BOTH, sort_order=None):
if sort_order is not None:
eib = self.management_system.buildEdgeIndex(label, name, direction, sort_order, keys)
else:
eib = self.management_system.buildEdgeIndex(label, name, direction, keys)
return eib
def graph_index(self, name):
return self.management_system.getGraphIndex(name)
def contains_graph_index(self, name):
return self.management_system.containsGraphIndex(name)
def build_index(self, name, element_type, keys=None, backing_index=None, unique=False):
if keys is None:
keys = []
index = self.management_system.buildIndex(name, element_type)
for key in keys:
index.addKey(key)
if unique:
index.unique()
if backing_index:
index.buildMixedIndex(backing_index)
else:
index.buildCompositeIndex()
return index
def contains_relation_type(self, name):
return self.management_system.containsRelationType(name)
def relation_type(self, name):
return self.management_system.getRelationType(name)
def contains_property_key(self, name):
return self.management_system.containsPropertyKey(name)
def property_key(self, name):
return self.management_system.getPropertyKey(name)
def contains_edge_label(self, name):
return self.management_system.containsEdgeLabel(name)
def edge_label(self, name):
return self.management_system.getEdgeLabel(name)
def make_property_key(self, name, data_type=None, cardinality=None, auto_make=True):
pkm = self.management_system.makePropertyKey(name)
if data_type:
pkm.dataType(data_type)
else:
pkm.dataType(String)
if cardinality:
pkm.cardinality(cardinality)
elif self.default_cardinality:
pkm.cardinality(self.default_cardinality)
if auto_make:
return pkm.make()
else:
return pkm
def make_edge_label(self, name, multiplicity=None, auto_make=True):
elm = self.management_system.makeEdgeLabel(name)
if multiplicity:
elm.multiplicity(multiplicity)
if auto_make:
return elm.make()
else:
return elm
def contains_vertex_label(self, name):
return self.management_system.containsVertexLabel(name)
def vertex_label(self, name):
return self.management_system.getVertexLabel(name)
def make_vertex_label(self, name, auto_make=True):
vlm = self.management_system.makeVertexLabel(name)
if auto_make:
return vlm.make()
else:
return vlm
def vertex_labels(self):
return self.management_system.getVertexLabels()
class Gremthon(object):
def __init__(self, graph):
self.graph = graph
atexit.register(self.graph.shutdown)
def __enter__(self):
return self
def __repr__(self):
return self.graph.toString()
def __exit__(self, exc_type, exc_val, exc_tb):
self.graph.shutdown()
def add_edge(self, index, out_v, in_v, label, **kwargs):
if isinstance(out_v, GremthonVertex):
out_v = out_v.vertex
if isinstance(in_v, GremthonVertex):
in_v = in_v.vertex
e = self.graph.addEdge(index, out_v, in_v, label)
for kw in kwargs:
e.setProperty(kw, kwargs[kw])
return map_gremthon_type(e)
def remove_edge(self, e):
if isinstance(e, GremthonEdge):
self.graph.removeEdge(e.edge)
else:
self.graph.removeEdge(e)
def add_vertex(self, index=None, vertex_label=None, **kwargs):
if vertex_label:
v = self.graph.addVertexWithLabel(vertex_label)
else:
v = self.graph.addVertex(index)
for kw in kwargs:
v.setProperty(kw, kwargs[kw])
return map_gremthon_type(v)
def remove_vertex(self, v):
if isinstance(v, GremthonVertex):
self.graph.removeVertex(v.vertex)
else:
self.graph.removeVertex(v)
def commit(self):
self.graph.commit()
@property
def E(self):
return GremthonPipeline(GremlinPipeline(self.graph).E())
def edges(self, key, value):
return GremthonPipeline(GremlinPipeline(self.graph).E(key, value))
@property
def V(self):
return GremthonPipeline(GremlinPipeline(self.graph).V())
def vertices(self, key, value):
return GremthonPipeline(GremlinPipeline(self.graph).V(key, value))
def e(self, index):
return GremthonPipeline(GremlinPipeline(self.graph.getEdge(index)))
def v(self, index):
return GremthonPipeline(GremlinPipeline(self.graph.getVertex(index)))
@property
def management_system(self):
try:
return GremthonManagementSystem(self.graph.getManagementSystem())
except AttributeError:
return None
def create_index(self, name, index_class, *args):
return self.graph.createIndex(name, index_class, args)
def idx(self, name):
for index in self.graph.getIndices():
if index.getIndexName() == name:
return index
return None
def input_graph(self, input_filename):
GraphSONReader.inputGraph(self.graph, FileInputStream(input_filename))
def output_graph(self, output_filename):
GraphSONWriter.outputGraph(self.graph, FileOutputStream(output_filename))
if __name__ == "__main__":
graph = TinkerGraphFactory.createTinkerGraph()
g = Gremthon(graph) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Boyd Adamson <boyd () boydadamson.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: svr4pkg
short_description: Manage Solaris SVR4 packages
description:
- Manages SVR4 packages on Solaris 10 and 11.
- These were the native packages on Solaris <= 10 and are available
as a legacy feature in Solaris 11.
- Note that this is a very basic packaging system. It will not enforce
dependencies on install or remove.
version_added: "0.9"
author: "Boyd Adamson (@brontitall)"
options:
name:
description:
- Package name, e.g. C(SUNWcsr)
required: true
state:
description:
- Whether to install (C(present)), or remove (C(absent)) a package.
- If the package is to be installed, then I(src) is required.
- The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package.
required: true
choices: ["present", "absent"]
src:
description:
- Specifies the location to install the package from. Required when C(state=present).
- "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)."
- If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there.
proxy:
description:
- HTTP[s] proxy to be used if C(src) is a URL.
response_file:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
zone:
description:
- Whether to install the package only in the current zone, or install it into all zones.
- The installation into all zones works only if you are working with the global zone.
required: false
default: "all"
choices: ["current", "all"]
version_added: "1.6"
category:
description:
- Install/Remove category instead of a single package.
required: false
choices: ["true", "false"]
version_added: "1.6"
'''
EXAMPLES = '''
# Install a package from an already copied file
- svr4pkg:
name: CSWcommon
src: /tmp/cswpkgs.pkg
state: present
# Install a package directly from an http site
- svr4pkg:
name: CSWpkgutil
src: 'http://get.opencsw.org/now'
state: present
zone: current
# Install a package with a response file
- svr4pkg:
name: CSWggrep
src: /tmp/third-party.pkg
response_file: /tmp/ggrep.response
state: present
# Ensure that a package is not installed.
- svr4pkg:
name: SUNWgnome-sound-recorder
state: absent
# Ensure that a category is not installed.
- svr4pkg:
name: FIREFOX
state: absent
category: true
'''
import os
import tempfile
def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
if category:
cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create_admin_file():
(desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True)
fullauto = '''
mail=
instance=unique
partial=nocheck
runlevel=quit
idepend=nocheck
rdepend=nocheck
space=quit
setuid=nocheck
conflict=nocheck
action=nocheck
networktimeout=60
networkretries=3
authentication=quit
keystore=/var/sadm/security
proxy=
basedir=default
'''
os.write(desc, fullauto)
os.close(desc)
return filename
def run_command(module, cmd):
progname = cmd[0]
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
cmd = [ 'pkgadd', '-n']
if zone == 'current':
cmd += [ '-G' ]
cmd += [ '-a', adminfile, '-d', src ]
if proxy is not None:
cmd += [ '-x', proxy ]
if response_file is not None:
cmd += [ '-r', response_file ]
if category:
cmd += [ '-Y' ]
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def package_uninstall(module, name, src, category):
adminfile = create_admin_file()
if category:
cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ]
else:
cmd = [ 'pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required = True),
state = dict(required = True, choices=['present', 'absent']),
src = dict(default = None),
proxy = dict(default = None),
response_file = dict(default = None),
zone = dict(required=False, default = 'all', choices=['current','all']),
category = dict(default=False, type='bool')
),
supports_check_mode=True
)
state = module.params['state']
name = module.params['name']
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
zone = module.params['zone']
category = module.params['category']
rc = None
out = ''
err = ''
result = {}
result['name'] = name
result['state'] = state
if state == 'present':
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
# Returncodes as per pkgadd(1m)
# 0 Successful completion
# 1 Fatal error.
# 2 Warning.
# 3 Interruption.
# 4 Administration.
# 5 Administration. Interaction is required. Do not use pkgadd -n.
# 10 Reboot after installation of all packages.
# 20 Reboot after installation of this package.
# 99 (observed) pkgadd: ERROR: could not process datastream from </tmp/pkgutil.pkg>
if rc in (0, 2, 3, 10, 20):
result['changed'] = True
# no install nor uninstall, or failed
else:
result['changed'] = False
# rc will be none when the package already was installed and no action took place
# Only return failed=False when the returncode is known to be good as there may be more
# undocumented failure return codes
if rc not in (None, 0, 2, 10, 20):
result['failed'] = True
else:
result['failed'] = False
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright 2012 Camptocamp SA (Guewen Baconnier)
# Copyright (C) 2010 Sébastien Beau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import easy_reconcile
from . import base_reconciliation
from . import simple_reconciliation
from . import easy_reconcile_history
from . import res_config | unknown | codeparrot/codeparrot-clean | ||
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
import os
import re
from ifmap_global import CamelCase
def getLinkInfoType(ident, link_info):
xlink = ident.getLink(link_info)
if xlink.getXsdType():
return xlink.getCType().getName()
return 'ApiPropertyBase'
class JavaApiGenerator(object):
def __init__(self, parser, type_map, identifiers, metadata):
self._parser = parser
self._type_map = type_map
self._top_level_map = {
'SubnetType': self._type_map['SubnetType']
}
self._identifier_map = identifiers
self._metadata_map = metadata
self._type_count = {}
def _FileWrite(self, file, multiline, indent_level):
lines = multiline.split('\n')
for line in lines:
line = ' ' * indent_level + line + '\n'
file.write(line)
#end _FileWrite
def _GenerateTypeClass(self, ctype, filename):
file = self._parser.makeFile(filename)
header = """//
// Automatically generated.
//
package net.juniper.contrail.api.types;
import java.util.List;
import java.util.ArrayList;
import net.juniper.contrail.api.ApiPropertyBase;
"""
file.write(header)
self._GenerateType(ctype, file, 0, {})
def _GenerateType(self, ctype, file, indent_level, inner_map):
if inner_map.get(ctype.getName()):
return
inner_map[ctype.getName()] = ctype
if indent_level and self._top_level_map.get(ctype.getName()):
return
count = self._type_count.get(ctype)
if count:
self._type_count[ctype] = count + 1
else:
self._type_count[ctype] = 1
if indent_level:
file.write(' ' * indent_level)
file.write('public ')
if indent_level:
file.write('static ')
file.write('class %s ' % ctype.getName())
if indent_level == 0:
file.write('extends ApiPropertyBase ')
file.write('{\n')
indent_level += 4
for dep in ctype.getDependentTypes():
self._GenerateType(dep, file, indent_level, inner_map)
for member in ctype.getDataMembers():
file.write(' ' * indent_level)
file.write('%s %s;\n' % (member.jtypename, member.membername))
# default constructor
file.write(' ' * indent_level)
file.write('public %s() {\n' % ctype.getName())
file.write(' ' * indent_level)
file.write('}\n')
# constructor with all properties
file.write(' ' * indent_level)
file.write('public %s(' % ctype.getName())
index = 0
for member in ctype.getDataMembers():
if index > 0:
file.write(', ')
file.write('%s %s' % (member.jtypename, member.membername))
index += 1
file.write(') {\n')
indent_level += 4
for member in ctype.getDataMembers():
file.write(' ' * indent_level)
file.write('this.%s = %s;\n' %
(member.membername, member.membername))
indent_level -= 4
file.write(' ' * indent_level)
file.write('}\n')
self._GenerateTypePropertyAccessors(file, ctype, indent_level);
self._GenerateTypePropertyConvinience(file, ctype, indent_level)
indent_level -= 4
if indent_level > 0:
file.write(' ' * indent_level)
file.write('}\n')
# _GenerateType
def _InnerPropertyArgument(self, inner, member):
decl = ''
if member.isComplex and not self._top_level_map.get(member.jtypename):
decl = inner.getName() + '.'
decl += member.jtypename
decl += ' ' + member.membername
return decl
def _GenerateTypePropertyAccessors(self, file, ctype, indent_level):
for prop in ctype.getDataMembers():
if prop.isSequence:
continue
decl = """
public %(type)s get%(caml)s() {
return %(field)s;
}
public void set%(caml)s(%(type)s %(field)s) {
this.%(field)s = %(field)s;
}
""" % {'caml': CamelCase(prop.membername), 'type': prop.jtypename,
'field': prop.membername}
self._FileWrite(file, decl, indent_level)
# _GenerateTypePropertyAccessors
def _GenerateTypePropertyConvinience(self, file, ctype, indent_level):
for member in ctype.getDataMembers():
if member.isSequence:
m = re.search(r'\<(.*)\>', member.jtypename)
if m:
innertype = m.group(1)
else:
print 'Unable to determine inner type for Collection: ' + member.jtypename
continue
methodname = CamelCase(member.membername)
decl = """
public List<%(typename)s> get%(caml)s() {
return %(field)s;
}
""" % { 'caml': methodname, 'typename': innertype, 'field': member.membername }
self._FileWrite(file, decl, indent_level)
if methodname.endswith('List'):
methodname = methodname[:-len('List')]
decl = """
public void add%(caml)s(%(typename)s obj) {
if (%(field)s == null) {
%(field)s = new ArrayList<%(typename)s>();
}
%(field)s.add(obj);
}
public void clear%(caml)s() {
%(field)s = null;
}
""" % {'caml': methodname, 'typename': innertype, 'field': member.membername}
self._FileWrite(file, decl, indent_level)
# convinience method that uses the inner type constructor
# arguments
inner = self._type_map.get(innertype)
if not inner or len(inner.getDataMembers()) > 4:
continue
decl = """
public void add%(caml)s(%(argdecl)s) {
if (%(field)s == null) {
%(field)s = new ArrayList<%(typename)s>();
}
%(field)s.add(new %(typename)s(%(arglist)s));
}
""" % {'caml': methodname, 'typename': innertype, 'field': member.membername,
'argdecl': ', '.join(
map(lambda x: self._InnerPropertyArgument(inner, x),
inner.getDataMembers())),
'arglist': ', '.join(
map(lambda x: x.membername, inner.getDataMembers()))
}
self._FileWrite(file, decl, indent_level)
# _GenerateTypePropertyConvinience
def _GenerateClass(self, ident, filename):
file = self._parser.makeFile(filename)
header = """//
// Automatically generated.
//
package net.juniper.contrail.api.types;
import java.util.List;
import java.util.ArrayList;
import com.google.common.collect.ImmutableList;
import net.juniper.contrail.api.ApiObjectBase;
import net.juniper.contrail.api.ApiPropertyBase;
import net.juniper.contrail.api.ObjectReference;
public class %(cls)s extends ApiObjectBase {
""" % {'cls': ident.getCppName() }
file.write(header)
for prop in ident.getProperties():
if prop.getName() == 'id-perms':
continue
decl = ' private %s %s;\n' % (prop.getJavaTypename(), prop.getCIdentifierName())
file.write(decl)
ctype = prop.getCType()
if ctype:
ctypename = ctype.getName()
self._top_level_map[ctypename] = self._type_map[ctypename]
for link_info in ident.getLinksInfo():
link_type = getLinkInfoType(ident, link_info)
if ident.isLinkRef(link_info):
link_to = ident.getLinkTo(link_info)
decl = ' private List<ObjectReference<%s>> %s_refs;\n' % (link_type, link_to.getCIdentifierName())
file.write(decl)
elif ident.isLinkHas(link_info):
child = ident.getLinkTo(link_info)
decl = ' private List<ObjectReference<%s>> %ss;\n' % (link_type, child.getCIdentifierName())
file.write(decl)
for back_link in ident.getBackLinksInfo():
link_from = ident.getBackLinkFrom(back_link)
link_type = getLinkInfoType(ident, back_link)
decl = ' private transient List<ObjectReference<%s>> %s_back_refs;\n' % (link_type, link_from.getCIdentifierName())
file.write(decl)
self._GenerateTypename(file, ident)
self._GenerateDefaultParent(file, ident)
self._GenerateDefaultParentType(file, ident)
self._GeneratePropertyAccessors(file, ident, 4)
for link_info in ident.getLinksInfo():
if ident.isLinkRef(link_info):
self._GenerateLinkRefAccessors(file, ident, link_info)
elif ident.isLinkHas(link_info):
self._GenerateLinkHasAccessors(file, ident, link_info)
for back_link in ident.getBackLinksInfo():
self._GenerateBackRefAccessors(file, ident, back_link)
file.write('}')
def _GenerateTypename(self, file, ident):
decl = """
@Override
public String getType() {
return "%s";
}
""" % ident.getName()
file.write(decl)
# _GenerateTypename
def _GenerateDefaultParent(self, file, ident):
fq_name = ''
parents = ident.getParents()
if parents:
(parent, meta) = parents[0]
quoted_list = map(lambda x: '"%s"' % x, parent.getDefaultFQName())
fq_name = ', '.join(quoted_list)
decl = """
@Override
public List<String> getDefaultParent() {
return ImmutableList.of(%s);
}
""" % fq_name
file.write(decl)
# _GenerateDefaultParent
def _GenerateDefaultParentType(self, file, ident):
def quote(s):
return '"' + s + '"'
typename = 'null';
parents = ident.getParents()
if parents:
(parent, meta) = parents[0]
typename = quote(parent.getName())
decl = """
@Override
public String getDefaultParentType() {
return %s;
}
""" % typename
file.write(decl)
# _GenerateDefaultParentType
def _GeneratePropertyAccessors(self, file, ident, indent_level):
for prop in ident.getProperties():
if prop.getName() == 'id-perms':
continue
gsname = prop.getCppName()
if gsname.startswith(ident.getCppName()):
gsname = gsname[len(ident.getCppName()):]
decl = """
public %(type)s get%(caml)s() {
return %(field)s;
}
public void set%(caml)s(%(type)s %(field)s) {
this.%(field)s = %(field)s;
}
""" % {'caml': gsname, 'type': prop.getJavaTypename(),
'field': prop.getCIdentifierName()}
self._FileWrite(file, decl, indent_level)
# _GeneratePropertyAccessors
def _GenerateLinkRefAccessors(self, file, ident, link_info):
link_to = ident.getLinkTo(link_info)
getter = """
public List<ObjectReference<%(attrtype)s>> get%(caml)s() {
return %(id)s_refs;
}
""" % {'attrtype': getLinkInfoType(ident, link_info), 'caml': link_to.getCppName(), 'id': link_to.getCIdentifierName() }
file.write(getter)
xlink = ident.getLink(link_info)
if xlink.getXsdType():
attrtype = xlink.getCType().getName()
self._top_level_map[attrtype] = self._type_map[attrtype]
setters = """
public void set%(caml)s(%(linktype)s obj, %(datatype)s data) {
%(field)s_refs = new ArrayList<ObjectReference<%(datatype)s>>();
%(field)s_refs.add(new ObjectReference<%(datatype)s>(obj.getQualifiedName(), data));
}
public void add%(caml)s(%(linktype)s obj, %(datatype)s data) {
if (%(field)s_refs == null) {
%(field)s_refs = new ArrayList<ObjectReference<%(datatype)s>>();
}
%(field)s_refs.add(new ObjectReference<%(datatype)s>(obj.getQualifiedName(), data));
}
public void remove%(caml)s(%(linktype)s obj, %(datatype)s data) {
if (%(field)s_refs != null) {
%(field)s_refs.remove(new ObjectReference<%(datatype)s>(obj.getQualifiedName(), data));
}
}
public void clear%(caml)s() {
if (%(field)s_refs != null) {
%(field)s_refs.clear();
return;
}
%(field)s_refs = null;
}
""" % {'caml': link_to.getCppName(), 'linktype': link_to.getCppName(),
'datatype': attrtype, 'field': link_to.getCIdentifierName()}
file.write(setters)
else:
setters = """
public void set%(caml)s(%(linktype)s obj) {
%(field)s_refs = new ArrayList<ObjectReference<ApiPropertyBase>>();
%(field)s_refs.add(new ObjectReference<ApiPropertyBase>(obj.getQualifiedName(), null));
}
public void add%(caml)s(%(linktype)s obj) {
if (%(field)s_refs == null) {
%(field)s_refs = new ArrayList<ObjectReference<ApiPropertyBase>>();
}
%(field)s_refs.add(new ObjectReference<ApiPropertyBase>(obj.getQualifiedName(), null));
}
public void clear%(caml)s() {
%(field)s_refs = null;
}
""" % {'caml': link_to.getCppName(), 'linktype': link_to.getCppName(),
'field': link_to.getCIdentifierName()}
file.write(setters)
# _GenerateLinkRefAccessors
def _GenerateLinkHasAccessors(self, file, ident, link_info):
child = ident.getLinkTo(link_info)
getter = """
public List<ObjectReference<%(attrtype)s>> get%(caml)ss() {
return %(id)ss;
}
""" % {'attrtype': getLinkInfoType(ident, link_info), 'caml': child.getCppName(), 'id': child.getCIdentifierName() }
file.write(getter)
# _GenerateLinkHasAccessors
def _GenerateBackRefAccessors(self, file, ident, back_link):
link_from = ident.getBackLinkFrom(back_link)
decl = """
public List<ObjectReference<%(attrtype)s>> get%(caml)sBackRefs() {
return %(field)s_back_refs;
}
""" % {'attrtype': getLinkInfoType(ident, back_link), 'caml': link_from.getCppName(), 'field': link_from.getCIdentifierName()}
file.write(decl)
# _GenerateBackRefAccessors
def Generate(self, dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
elif not os.path.isdir(dirname):
print "-o option must specify directory"
sys.exit(1)
for ident in self._identifier_map.values():
filename = os.path.join(dirname, ident.getCppName() + ".java")
self._GenerateClass(ident, filename)
for ctype in self._top_level_map.values():
filename = os.path.join(dirname, ctype.getName() + ".java")
self._GenerateTypeClass(ctype, filename)
for cname, count in self._type_count.items():
if count > 1:
print 'type %s count: %d' % (cname.getName(), count) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generates GEN_JNI.java (or N.java) and helper for manual JNI registration.
Creates a header file with two static functions: RegisterMainDexNatives() and
RegisterNonMainDexNatives(). Together, these will use manual JNI registration
to register all native methods that exist within an application."""
import argparse
import functools
import multiprocessing
import os
import string
import sys
import zipfile
import jni_generator
from util import build_utils
# All but FULL_CLASS_NAME, which is used only for sorting.
MERGEABLE_KEYS = [
'CLASS_PATH_DECLARATIONS',
'FORWARD_DECLARATIONS',
'JNI_NATIVE_METHOD',
'JNI_NATIVE_METHOD_ARRAY',
'PROXY_NATIVE_SIGNATURES',
'PROXY_NATIVE_METHOD_ARRAY',
'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX',
'REGISTER_MAIN_DEX_NATIVES',
'REGISTER_NON_MAIN_DEX_NATIVES',
]
def _Generate(java_file_paths,
srcjar_path,
proxy_opts,
header_path=None,
namespace=''):
"""Generates files required to perform JNI registration.
Generates a srcjar containing a single class, GEN_JNI, that contains all
native method declarations.
Optionally generates a header file that provides functions
(RegisterMainDexNatives and RegisterNonMainDexNatives) to perform
JNI registration.
Args:
java_file_paths: A list of java file paths.
srcjar_path: Path to the GEN_JNI srcjar.
header_path: If specified, generates a header file in this location.
namespace: If specified, sets the namespace for the generated header file.
"""
# Without multiprocessing, script takes ~13 seconds for chrome_public_apk
# on a z620. With multiprocessing, takes ~2 seconds.
pool = multiprocessing.Pool()
results = []
for d in pool.imap_unordered(
functools.partial(_DictForPath, use_proxy_hash=proxy_opts.use_hash),
java_file_paths):
if d:
results.append(d)
pool.close()
# Sort to make output deterministic.
results.sort(key=lambda d: d['FULL_CLASS_NAME'])
combined_dict = {}
for key in MERGEABLE_KEYS:
combined_dict[key] = ''.join(d.get(key, '') for d in results)
if header_path:
combined_dict['HEADER_GUARD'] = \
os.path.splitext(header_path)[0].replace('/', '_').upper() + '_'
combined_dict['NAMESPACE'] = namespace
header_content = CreateFromDict(combined_dict, proxy_opts.use_hash)
with build_utils.AtomicOutput(header_path, mode='w') as f:
f.write(header_content)
with build_utils.AtomicOutput(srcjar_path) as f:
with zipfile.ZipFile(f, 'w') as srcjar:
build_utils.AddToZipHermetic(
srcjar,
'%s.java' % jni_generator.ProxyHelpers.GetQualifiedClass(
proxy_opts.use_hash),
data=CreateProxyJavaFromDict(combined_dict, proxy_opts))
def _DictForPath(path, use_proxy_hash=False):
with open(path) as f:
contents = jni_generator.RemoveComments(f.read())
if '@JniIgnoreNatives' in contents:
return None
fully_qualified_class = jni_generator.ExtractFullyQualifiedJavaClassName(
path, contents)
natives = jni_generator.ExtractNatives(contents, 'long')
natives += jni_generator.ProxyHelpers.ExtractStaticProxyNatives(
fully_qualified_class=fully_qualified_class,
contents=contents,
ptr_type='long',
use_hash=use_proxy_hash)
if len(natives) == 0:
return None
namespace = jni_generator.ExtractJNINamespace(contents)
jni_params = jni_generator.JniParams(fully_qualified_class)
jni_params.ExtractImportsAndInnerClasses(contents)
is_main_dex = jni_generator.IsMainDexJavaClass(contents)
header_generator = HeaderGenerator(namespace, fully_qualified_class, natives,
jni_params, is_main_dex, use_proxy_hash)
return header_generator.Generate()
def _SetProxyRegistrationFields(registration_dict, use_hash):
registration_template = string.Template("""\
static const JNINativeMethod kMethods_${ESCAPED_PROXY_CLASS}[] = {
${KMETHODS}
};
namespace {
JNI_REGISTRATION_EXPORT bool ${REGISTRATION_NAME}(JNIEnv* env) {
const int number_of_methods = base::size(kMethods_${ESCAPED_PROXY_CLASS});
base::android::ScopedJavaLocalRef<jclass> native_clazz =
base::android::GetClass(env, "${PROXY_CLASS}");
if (env->RegisterNatives(
native_clazz.obj(),
kMethods_${ESCAPED_PROXY_CLASS},
number_of_methods) < 0) {
jni_generator::HandleRegistrationError(env, native_clazz.obj(), __FILE__);
return false;
}
return true;
}
} // namespace
""")
registration_call = string.Template("""\
// Register natives in a proxy.
if (!${REGISTRATION_NAME}(env)) {
return false;
}
""")
sub_dict = {
'ESCAPED_PROXY_CLASS':
jni_generator.EscapeClassName(
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash)),
'PROXY_CLASS':
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash),
'KMETHODS':
registration_dict['PROXY_NATIVE_METHOD_ARRAY'],
'REGISTRATION_NAME':
jni_generator.GetRegistrationFunctionName(
jni_generator.ProxyHelpers.GetQualifiedClass(use_hash)),
}
if registration_dict['PROXY_NATIVE_METHOD_ARRAY']:
proxy_native_array = registration_template.substitute(sub_dict)
proxy_natives_registration = registration_call.substitute(sub_dict)
else:
proxy_native_array = ''
proxy_natives_registration = ''
if registration_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX']:
sub_dict['REGISTRATION_NAME'] += 'MAIN_DEX'
sub_dict['ESCAPED_PROXY_CLASS'] += 'MAIN_DEX'
sub_dict['KMETHODS'] = (
registration_dict['PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'])
proxy_native_array += registration_template.substitute(sub_dict)
main_dex_call = registration_call.substitute(sub_dict)
else:
main_dex_call = ''
registration_dict['PROXY_NATIVE_METHOD_ARRAY'] = proxy_native_array
registration_dict['REGISTER_PROXY_NATIVES'] = proxy_natives_registration
registration_dict['REGISTER_MAIN_DEX_PROXY_NATIVES'] = main_dex_call
def CreateProxyJavaFromDict(registration_dict, proxy_opts):
template = string.Template("""\
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package ${PACKAGE};
// This file is autogenerated by
// base/android/jni_generator/jni_registration_generator.py
// Please do not change its content.
public class ${CLASS_NAME} {
public static final boolean TESTING_ENABLED = ${TESTING_ENABLED};
public static final boolean REQUIRE_MOCK = ${REQUIRE_MOCK};
${SIGNATURES}
}
""")
return template.substitute({
'TESTING_ENABLED':
str(proxy_opts.enable_mocks).lower(),
'REQUIRE_MOCK':
str(proxy_opts.require_mocks).lower(),
'CLASS_NAME':
jni_generator.ProxyHelpers.GetClass(proxy_opts.use_hash),
'PACKAGE':
jni_generator.ProxyHelpers.GetPackage(proxy_opts.use_hash).replace(
'/', '.'),
'SIGNATURES':
registration_dict['PROXY_NATIVE_SIGNATURES']
})
def CreateFromDict(registration_dict, use_hash):
"""Returns the content of the header file."""
template = string.Template("""\
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is autogenerated by
// base/android/jni_generator/jni_registration_generator.py
// Please do not change its content.
#ifndef ${HEADER_GUARD}
#define ${HEADER_GUARD}
#include <jni.h>
#include "base/android/jni_generator/jni_generator_helper.h"
#include "base/android/jni_int_wrapper.h"
#include "base/stl_util.h" // For base::size().
// Step 1: Forward declarations (classes).
${CLASS_PATH_DECLARATIONS}
// Step 2: Forward declarations (methods).
${FORWARD_DECLARATIONS}
// Step 3: Method declarations.
${JNI_NATIVE_METHOD_ARRAY}\
${PROXY_NATIVE_METHOD_ARRAY}\
${JNI_NATIVE_METHOD}
// Step 4: Main dex and non-main dex registration functions.
namespace ${NAMESPACE} {
bool RegisterMainDexNatives(JNIEnv* env) {\
${REGISTER_MAIN_DEX_PROXY_NATIVES}
${REGISTER_MAIN_DEX_NATIVES}
return true;
}
bool RegisterNonMainDexNatives(JNIEnv* env) {\
${REGISTER_PROXY_NATIVES}
${REGISTER_NON_MAIN_DEX_NATIVES}
return true;
}
} // namespace ${NAMESPACE}
#endif // ${HEADER_GUARD}
""")
_SetProxyRegistrationFields(registration_dict, use_hash)
if len(registration_dict['FORWARD_DECLARATIONS']) == 0:
return ''
return template.substitute(registration_dict)
class HeaderGenerator(object):
"""Generates an inline header file for JNI registration."""
def __init__(self, namespace, fully_qualified_class, natives, jni_params,
main_dex, use_proxy_hash):
self.namespace = namespace
self.natives = natives
self.proxy_natives = [n for n in natives if n.is_proxy]
self.non_proxy_natives = [n for n in natives if not n.is_proxy]
self.fully_qualified_class = fully_qualified_class
self.jni_params = jni_params
self.class_name = self.fully_qualified_class.split('/')[-1]
self.main_dex = main_dex
self.helper = jni_generator.HeaderFileGeneratorHelper(
self.class_name, fully_qualified_class, use_proxy_hash)
self.use_proxy_hash = use_proxy_hash
self.registration_dict = None
def Generate(self):
self.registration_dict = {'FULL_CLASS_NAME': self.fully_qualified_class}
self._AddClassPathDeclarations()
self._AddForwardDeclaration()
self._AddJNINativeMethodsArrays()
self._AddProxySignatures()
self._AddProxyNativeMethodKStrings()
self._AddRegisterNativesCalls()
self._AddRegisterNativesFunctions()
return self.registration_dict
def _SetDictValue(self, key, value):
self.registration_dict[key] = jni_generator.WrapOutput(value)
def _AddClassPathDeclarations(self):
classes = self.helper.GetUniqueClasses(self.natives)
self._SetDictValue(
'CLASS_PATH_DECLARATIONS',
self.helper.GetClassPathLines(classes, declare_only=True))
def _AddForwardDeclaration(self):
"""Add the content of the forward declaration to the dictionary."""
template = string.Template("""\
JNI_GENERATOR_EXPORT ${RETURN} ${STUB_NAME}(
JNIEnv* env,
${PARAMS_IN_STUB});
""")
forward_declaration = ''
for native in self.natives:
value = {
'RETURN': jni_generator.JavaDataTypeToC(native.return_type),
'STUB_NAME': self.helper.GetStubName(native),
'PARAMS_IN_STUB': jni_generator.GetParamsInStub(native),
}
forward_declaration += template.substitute(value)
self._SetDictValue('FORWARD_DECLARATIONS', forward_declaration)
def _AddRegisterNativesCalls(self):
"""Add the body of the RegisterNativesImpl method to the dictionary."""
# Only register if there is at least 1 non-proxy native
if len(self.non_proxy_natives) == 0:
return ''
template = string.Template("""\
if (!${REGISTER_NAME}(env))
return false;
""")
value = {
'REGISTER_NAME':
jni_generator.GetRegistrationFunctionName(self.fully_qualified_class)
}
register_body = template.substitute(value)
if self.main_dex:
self._SetDictValue('REGISTER_MAIN_DEX_NATIVES', register_body)
else:
self._SetDictValue('REGISTER_NON_MAIN_DEX_NATIVES', register_body)
def _AddJNINativeMethodsArrays(self):
"""Returns the implementation of the array of native methods."""
template = string.Template("""\
static const JNINativeMethod kMethods_${JAVA_CLASS}[] = {
${KMETHODS}
};
""")
open_namespace = ''
close_namespace = ''
if self.namespace:
parts = self.namespace.split('::')
all_namespaces = ['namespace %s {' % ns for ns in parts]
open_namespace = '\n'.join(all_namespaces) + '\n'
all_namespaces = ['} // namespace %s' % ns for ns in parts]
all_namespaces.reverse()
close_namespace = '\n'.join(all_namespaces) + '\n\n'
body = self._SubstituteNativeMethods(template)
self._SetDictValue('JNI_NATIVE_METHOD_ARRAY', ''.join((open_namespace, body,
close_namespace)))
def _GetKMethodsString(self, clazz):
ret = []
for native in self.non_proxy_natives:
if (native.java_class_name == clazz
or (not native.java_class_name and clazz == self.class_name)):
ret += [self._GetKMethodArrayEntry(native)]
return '\n'.join(ret)
def _GetKMethodArrayEntry(self, native):
template = string.Template(' { "${NAME}", ${JNI_SIGNATURE}, ' +
'reinterpret_cast<void*>(${STUB_NAME}) },')
name = 'native' + native.name
if native.is_proxy:
# Literal name of the native method in the class that contains the actual
# native declaration.
name = native.proxy_name
values = {
'NAME':
name,
'JNI_SIGNATURE':
self.jni_params.Signature(native.params, native.return_type),
'STUB_NAME':
self.helper.GetStubName(native)
}
return template.substitute(values)
def _AddProxySignatures(self):
self.registration_dict['PROXY_NATIVE_SIGNATURES'] = ('\n'.join(
_MakeProxySignature(n) for n in self.proxy_natives))
def _AddProxyNativeMethodKStrings(self):
"""Returns KMethodString for wrapped native methods in all_classes """
if self.main_dex:
key = 'PROXY_NATIVE_METHOD_ARRAY_MAIN_DEX'
else:
key = 'PROXY_NATIVE_METHOD_ARRAY'
proxy_k_strings = ('\n'.join(
self._GetKMethodArrayEntry(p) for p in self.proxy_natives))
self._SetDictValue(key, proxy_k_strings)
def _SubstituteNativeMethods(self, template, sub_proxy=False):
"""Substitutes NAMESPACE, JAVA_CLASS and KMETHODS in the provided
template."""
ret = []
all_classes = self.helper.GetUniqueClasses(self.natives)
all_classes[self.class_name] = self.fully_qualified_class
for clazz, full_clazz in all_classes.items():
if not sub_proxy:
if clazz == jni_generator.ProxyHelpers.GetClass(self.use_proxy_hash):
continue
kmethods = self._GetKMethodsString(clazz)
namespace_str = ''
if self.namespace:
namespace_str = self.namespace + '::'
if kmethods:
values = {
'NAMESPACE': namespace_str,
'JAVA_CLASS': jni_generator.EscapeClassName(full_clazz),
'KMETHODS': kmethods
}
ret += [template.substitute(values)]
if not ret: return ''
return '\n'.join(ret)
def GetJNINativeMethodsString(self):
"""Returns the implementation of the array of native methods."""
template = string.Template("""\
static const JNINativeMethod kMethods_${JAVA_CLASS}[] = {
${KMETHODS}
};
""")
return self._SubstituteNativeMethods(template)
def _AddRegisterNativesFunctions(self):
"""Returns the code for RegisterNatives."""
natives = self._GetRegisterNativesImplString()
if not natives:
return ''
template = string.Template("""\
JNI_REGISTRATION_EXPORT bool ${REGISTER_NAME}(JNIEnv* env) {
${NATIVES}\
return true;
}
""")
values = {
'REGISTER_NAME':
jni_generator.GetRegistrationFunctionName(self.fully_qualified_class),
'NATIVES':
natives
}
self._SetDictValue('JNI_NATIVE_METHOD', template.substitute(values))
def _GetRegisterNativesImplString(self):
"""Returns the shared implementation for RegisterNatives."""
template = string.Template("""\
const int kMethods_${JAVA_CLASS}Size =
base::size(${NAMESPACE}kMethods_${JAVA_CLASS});
if (env->RegisterNatives(
${JAVA_CLASS}_clazz(env),
${NAMESPACE}kMethods_${JAVA_CLASS},
kMethods_${JAVA_CLASS}Size) < 0) {
jni_generator::HandleRegistrationError(env,
${JAVA_CLASS}_clazz(env),
__FILE__);
return false;
}
""")
# Only register if there is a native method not in a proxy,
# since all the proxies will be registered together.
if len(self.non_proxy_natives) != 0:
return self._SubstituteNativeMethods(template)
return ''
def _MakeProxySignature(proxy_native):
signature_template = string.Template("""
public static native ${RETURN_TYPE} ${NAME}(${PARAMS});""")
return signature_template.substitute({
'RETURN_TYPE':
proxy_native.return_type,
'NAME':
proxy_native.proxy_name,
'PARAMS':
jni_generator.JniParams.MakeProxyParamSignature(proxy_native.params)
})
class ProxyOptions:
def __init__(self, **kwargs):
self.use_hash = kwargs.get('use_hash', False)
self.enable_mocks = kwargs.get('enable_mocks', False)
self.require_mocks = kwargs.get('require_mocks', False)
# Can never require and disable.
assert self.enable_mocks or not self.require_mocks
def main(argv):
arg_parser = argparse.ArgumentParser()
build_utils.AddDepfileOption(arg_parser)
arg_parser.add_argument(
'--sources-files',
required=True,
action='append',
help='A list of .sources files which contain Java '
'file paths.')
arg_parser.add_argument(
'--header-path', help='Path to output header file (optional).')
arg_parser.add_argument(
'--srcjar-path',
required=True,
help='Path to output srcjar for GEN_JNI.java (Or J/N.java if proxy'
' hash is enabled).')
arg_parser.add_argument(
'--sources-blacklist',
default=[],
help='A list of Java files which should be ignored '
'by the parser.')
arg_parser.add_argument(
'--namespace',
default='',
help='Namespace to wrap the registration functions '
'into.')
# TODO(crbug.com/898261) hook these flags up to the build config to enable
# mocking in instrumentation tests
arg_parser.add_argument(
'--enable_proxy_mocks',
default=False,
action='store_true',
help='Allows proxy native impls to be mocked through Java.')
arg_parser.add_argument(
'--require_mocks',
default=False,
action='store_true',
help='Requires all used native implementations to have a mock set when '
'called. Otherwise an exception will be thrown.')
arg_parser.add_argument(
'--use_proxy_hash',
action='store_true',
help='Enables hashing of the native declaration for methods in '
'an @JniNatives interface')
args = arg_parser.parse_args(build_utils.ExpandFileArgs(argv[1:]))
if not args.enable_proxy_mocks and args.require_mocks:
arg_parser.error(
'Invalid arguments: --require_mocks without --enable_proxy_mocks. '
'Cannot require mocks if they are not enabled.')
sources_files = sorted(set(build_utils.ParseGnList(args.sources_files)))
proxy_opts = ProxyOptions(
use_hash=args.use_proxy_hash,
require_mocks=args.require_mocks,
enable_mocks=args.enable_proxy_mocks)
java_file_paths = []
for f in sources_files:
# Skip generated files, since the GN targets do not declare any deps.
java_file_paths.extend(
p for p in build_utils.ReadSourcesList(f)
if p.startswith('..') and p not in args.sources_blacklist)
_Generate(
java_file_paths,
args.srcjar_path,
proxy_opts=proxy_opts,
header_path=args.header_path,
namespace=args.namespace)
if args.depfile:
build_utils.WriteDepfile(
args.depfile,
args.srcjar_path,
sources_files + java_file_paths,
add_pydeps=False)
if __name__ == '__main__':
sys.exit(main(sys.argv)) | unknown | codeparrot/codeparrot-clean | ||
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bufio
import (
"bytes"
"errors"
"io"
"unicode/utf8"
)
// Scanner provides a convenient interface for reading data such as
// a file of newline-delimited lines of text. Successive calls to
// the [Scanner.Scan] method will step through the 'tokens' of a file, skipping
// the bytes between the tokens. The specification of a token is
// defined by a split function of type [SplitFunc]; the default split
// function breaks the input into lines with line termination stripped. [Scanner.Split]
// functions are defined in this package for scanning a file into
// lines, bytes, UTF-8-encoded runes, and space-delimited words. The
// client may instead provide a custom split function.
//
// Scanning stops unrecoverably at EOF, the first I/O error, or a token too
// large to fit in the [Scanner.Buffer]. When a scan stops, the reader may have
// advanced arbitrarily far past the last token. Programs that need more
// control over error handling or large tokens, or must run sequential scans
// on a reader, should use [bufio.Reader] instead.
type Scanner struct {
r io.Reader // The reader provided by the client.
split SplitFunc // The function to split the tokens.
maxTokenSize int // Maximum size of a token; modified by tests.
token []byte // Last token returned by split.
buf []byte // Buffer used as argument to split.
start int // First non-processed byte in buf.
end int // End of data in buf.
err error // Sticky error.
empties int // Count of successive empty tokens.
scanCalled bool // Scan has been called; buffer is in use.
done bool // Scan has finished.
}
// SplitFunc is the signature of the split function used to tokenize the
// input. The arguments are an initial substring of the remaining unprocessed
// data and a flag, atEOF, that reports whether the [Reader] has no more data
// to give. The return values are the number of bytes to advance the input
// and the next token to return to the user, if any, plus an error, if any.
//
// Scanning stops if the function returns an error, in which case some of
// the input may be discarded. If that error is [ErrFinalToken], scanning
// stops with no error. A non-nil token delivered with [ErrFinalToken]
// will be the last token, and a nil token with [ErrFinalToken]
// immediately stops the scanning.
//
// Otherwise, the [Scanner] advances the input. If the token is not nil,
// the [Scanner] returns it to the user. If the token is nil, the
// Scanner reads more data and continues scanning; if there is no more
// data--if atEOF was true--the [Scanner] returns. If the data does not
// yet hold a complete token, for instance if it has no newline while
// scanning lines, a [SplitFunc] can return (0, nil, nil) to signal the
// [Scanner] to read more data into the slice and try again with a
// longer slice starting at the same point in the input.
//
// The function is never called with an empty data slice unless atEOF
// is true. If atEOF is true, however, data may be non-empty and,
// as always, holds unprocessed text.
type SplitFunc func(data []byte, atEOF bool) (advance int, token []byte, err error)
// Errors returned by Scanner.
var (
ErrTooLong = errors.New("bufio.Scanner: token too long")
ErrNegativeAdvance = errors.New("bufio.Scanner: SplitFunc returns negative advance count")
ErrAdvanceTooFar = errors.New("bufio.Scanner: SplitFunc returns advance count beyond input")
ErrBadReadCount = errors.New("bufio.Scanner: Read returned impossible count")
)
const (
// MaxScanTokenSize is the maximum size used to buffer a token
// unless the user provides an explicit buffer with [Scanner.Buffer].
// The actual maximum token size may be smaller as the buffer
// may need to include, for instance, a newline.
MaxScanTokenSize = 64 * 1024
startBufSize = 4096 // Size of initial allocation for buffer.
)
// NewScanner returns a new [Scanner] to read from r.
// The split function defaults to [ScanLines].
func NewScanner(r io.Reader) *Scanner {
return &Scanner{
r: r,
split: ScanLines,
maxTokenSize: MaxScanTokenSize,
}
}
// Err returns the first non-EOF error that was encountered by the [Scanner].
func (s *Scanner) Err() error {
if s.err == io.EOF {
return nil
}
return s.err
}
// Bytes returns the most recent token generated by a call to [Scanner.Scan].
// The underlying array may point to data that will be overwritten
// by a subsequent call to Scan. It does no allocation.
func (s *Scanner) Bytes() []byte {
return s.token
}
// Text returns the most recent token generated by a call to [Scanner.Scan]
// as a newly allocated string holding its bytes.
func (s *Scanner) Text() string {
return string(s.token)
}
// ErrFinalToken is a special sentinel error value. It is intended to be
// returned by a Split function to indicate that the scanning should stop
// with no error. If the token being delivered with this error is not nil,
// the token is the last token.
//
// The value is useful to stop processing early or when it is necessary to
// deliver a final empty token (which is different from a nil token).
// One could achieve the same behavior with a custom error value but
// providing one here is tidier.
// See the emptyFinalToken example for a use of this value.
var ErrFinalToken = errors.New("final token")
// Scan advances the [Scanner] to the next token, which will then be
// available through the [Scanner.Bytes] or [Scanner.Text] method. It returns false when
// there are no more tokens, either by reaching the end of the input or an error.
// After Scan returns false, the [Scanner.Err] method will return any error that
// occurred during scanning, except that if it was [io.EOF], [Scanner.Err]
// will return nil.
// Scan panics if the split function returns too many empty
// tokens without advancing the input. This is a common error mode for
// scanners.
func (s *Scanner) Scan() bool {
if s.done {
return false
}
s.scanCalled = true
// Loop until we have a token.
for {
// See if we can get a token with what we already have.
// If we've run out of data but have an error, give the split function
// a chance to recover any remaining, possibly empty token.
if s.end > s.start || s.err != nil {
advance, token, err := s.split(s.buf[s.start:s.end], s.err != nil)
if err != nil {
if err == ErrFinalToken {
s.token = token
s.done = true
// When token is not nil, it means the scanning stops
// with a trailing token, and thus the return value
// should be true to indicate the existence of the token.
return token != nil
}
s.setErr(err)
return false
}
if !s.advance(advance) {
return false
}
s.token = token
if token != nil {
if s.err == nil || advance > 0 {
s.empties = 0
} else {
// Returning tokens not advancing input at EOF.
s.empties++
if s.empties > maxConsecutiveEmptyReads {
panic("bufio.Scan: too many empty tokens without progressing")
}
}
return true
}
}
// We cannot generate a token with what we are holding.
// If we've already hit EOF or an I/O error, we are done.
if s.err != nil {
// Shut it down.
s.start = 0
s.end = 0
return false
}
// Must read more data.
// First, shift data to beginning of buffer if there's lots of empty space
// or space is needed.
if s.start > 0 && (s.end == len(s.buf) || s.start > len(s.buf)/2) {
copy(s.buf, s.buf[s.start:s.end])
s.end -= s.start
s.start = 0
}
// Is the buffer full? If so, resize.
if s.end == len(s.buf) {
// Guarantee no overflow in the multiplication below.
const maxInt = int(^uint(0) >> 1)
if len(s.buf) >= s.maxTokenSize || len(s.buf) > maxInt/2 {
s.setErr(ErrTooLong)
return false
}
newSize := len(s.buf) * 2
if newSize == 0 {
newSize = startBufSize
}
newSize = min(newSize, s.maxTokenSize)
newBuf := make([]byte, newSize)
copy(newBuf, s.buf[s.start:s.end])
s.buf = newBuf
s.end -= s.start
s.start = 0
}
// Finally we can read some input. Make sure we don't get stuck with
// a misbehaving Reader. Officially we don't need to do this, but let's
// be extra careful: Scanner is for safe, simple jobs.
for loop := 0; ; {
n, err := s.r.Read(s.buf[s.end:len(s.buf)])
if n < 0 || len(s.buf)-s.end < n {
s.setErr(ErrBadReadCount)
break
}
s.end += n
if err != nil {
s.setErr(err)
break
}
if n > 0 {
s.empties = 0
break
}
loop++
if loop > maxConsecutiveEmptyReads {
s.setErr(io.ErrNoProgress)
break
}
}
}
}
// advance consumes n bytes of the buffer. It reports whether the advance was legal.
func (s *Scanner) advance(n int) bool {
if n < 0 {
s.setErr(ErrNegativeAdvance)
return false
}
if n > s.end-s.start {
s.setErr(ErrAdvanceTooFar)
return false
}
s.start += n
return true
}
// setErr records the first error encountered.
func (s *Scanner) setErr(err error) {
if s.err == nil || s.err == io.EOF {
s.err = err
}
}
// Buffer controls memory allocation by the Scanner.
// It sets the initial buffer to use when scanning
// and the maximum size of buffer that may be allocated during scanning.
// The contents of the buffer are ignored.
//
// The maximum token size must be less than the larger of max and cap(buf).
// If max <= cap(buf), [Scanner.Scan] will use this buffer only and do no allocation.
//
// By default, [Scanner.Scan] uses an internal buffer and sets the
// maximum token size to [MaxScanTokenSize].
//
// Buffer panics if it is called after scanning has started.
func (s *Scanner) Buffer(buf []byte, max int) {
if s.scanCalled {
panic("Buffer called after Scan")
}
s.buf = buf[0:cap(buf)]
s.maxTokenSize = max
}
// Split sets the split function for the [Scanner].
// The default split function is [ScanLines].
//
// Split panics if it is called after scanning has started.
func (s *Scanner) Split(split SplitFunc) {
if s.scanCalled {
panic("Split called after Scan")
}
s.split = split
}
// Split functions
// ScanBytes is a split function for a [Scanner] that returns each byte as a token.
func ScanBytes(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
return 1, data[0:1], nil
}
var errorRune = []byte(string(utf8.RuneError))
// ScanRunes is a split function for a [Scanner] that returns each
// UTF-8-encoded rune as a token. The sequence of runes returned is
// equivalent to that from a range loop over the input as a string, which
// means that erroneous UTF-8 encodings translate to U+FFFD = "\xef\xbf\xbd".
// Because of the Scan interface, this makes it impossible for the client to
// distinguish correctly encoded replacement runes from encoding errors.
func ScanRunes(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
// Fast path 1: ASCII.
if data[0] < utf8.RuneSelf {
return 1, data[0:1], nil
}
// Fast path 2: Correct UTF-8 decode without error.
_, width := utf8.DecodeRune(data)
if width > 1 {
// It's a valid encoding. Width cannot be one for a correctly encoded
// non-ASCII rune.
return width, data[0:width], nil
}
// We know it's an error: we have width==1 and implicitly r==utf8.RuneError.
// Is the error because there wasn't a full rune to be decoded?
// FullRune distinguishes correctly between erroneous and incomplete encodings.
if !atEOF && !utf8.FullRune(data) {
// Incomplete; get more bytes.
return 0, nil, nil
}
// We have a real UTF-8 encoding error. Return a properly encoded error rune
// but advance only one byte. This matches the behavior of a range loop over
// an incorrectly encoded string.
return 1, errorRune, nil
}
// dropCR drops a terminal \r from the data.
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
// ScanLines is a split function for a [Scanner] that returns each line of
// text, stripped of any trailing end-of-line marker. The returned line may
// be empty. The end-of-line marker is one optional carriage return followed
// by one mandatory newline. In regular expression notation, it is `\r?\n`.
// The last non-empty line of input will be returned even if it has no
// newline.
func ScanLines(data []byte, atEOF bool) (advance int, token []byte, err error) {
if atEOF && len(data) == 0 {
return 0, nil, nil
}
if i := bytes.IndexByte(data, '\n'); i >= 0 {
// We have a full newline-terminated line.
return i + 1, dropCR(data[0:i]), nil
}
// If we're at EOF, we have a final, non-terminated line. Return it.
if atEOF {
return len(data), dropCR(data), nil
}
// Request more data.
return 0, nil, nil
}
// isSpace reports whether the character is a Unicode white space character.
// We avoid dependency on the unicode package, but check validity of the implementation
// in the tests.
func isSpace(r rune) bool {
if r <= '\u00FF' {
// Obvious ASCII ones: \t through \r plus space. Plus two Latin-1 oddballs.
switch r {
case ' ', '\t', '\n', '\v', '\f', '\r':
return true
case '\u0085', '\u00A0':
return true
}
return false
}
// High-valued ones.
if '\u2000' <= r && r <= '\u200a' {
return true
}
switch r {
case '\u1680', '\u2028', '\u2029', '\u202f', '\u205f', '\u3000':
return true
}
return false
}
// ScanWords is a split function for a [Scanner] that returns each
// space-separated word of text, with surrounding spaces deleted. It will
// never return an empty string. The definition of space is set by
// unicode.IsSpace.
func ScanWords(data []byte, atEOF bool) (advance int, token []byte, err error) {
// Skip leading spaces.
start := 0
for width := 0; start < len(data); start += width {
var r rune
r, width = utf8.DecodeRune(data[start:])
if !isSpace(r) {
break
}
}
// Scan until space, marking end of word.
for width, i := 0, start; i < len(data); i += width {
var r rune
r, width = utf8.DecodeRune(data[i:])
if isSpace(r) {
return i + width, data[start:i], nil
}
}
// If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
if atEOF && len(data) > start {
return len(data), data[start:], nil
}
// Request more data.
return start, nil, nil
} | go | github | https://github.com/golang/go | src/bufio/scan.go |
import {Routes} from '@angular/router';
import {Home} from './home/home';
import {User} from './user/user';
export const routes: Routes = [
{
path: '',
title: 'App Home Page',
component: Home,
},
{
path: 'user',
title: 'App User Page',
component: User,
},
]; | typescript | github | https://github.com/angular/angular | adev/src/content/tutorials/learn-angular/steps/13-define-a-route/answer/src/app/app.routes.ts |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_servicegroup
short_description: Manage service group configuration in Netscaler
description:
- Manage service group configuration in Netscaler.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
servicegroupname:
description:
- >-
Name of the service group. Must begin with an ASCII alphabetic or underscore C(_) character, and must
contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals
C(=), and hyphen C(-) characters. Can be changed after the name is created.
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'DTLS'
- 'NNTP'
- 'RPCSVR'
- 'DNS'
- 'ADNS'
- 'SNMP'
- 'RTSP'
- 'DHCPRA'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'DNS_TCP'
- 'ADNS_TCP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
- 'RADIUS'
- 'RADIUSListener'
- 'RDP'
- 'DIAMETER'
- 'SSL_DIAMETER'
- 'TFTP'
- 'SMPP'
- 'PPTP'
- 'GRE'
- 'SYSLOGTCP'
- 'SYSLOGUDP'
- 'FIX'
- 'SSL_FIX'
description:
- "Protocol used to exchange data with the service."
cachetype:
choices:
- 'TRANSPARENT'
- 'REVERSE'
- 'FORWARD'
description:
- "Cache type supported by the cache server."
maxclient:
description:
- "Maximum number of simultaneous open connections for the service group."
- "Minimum value = C(0)"
- "Maximum value = C(4294967294)"
maxreq:
description:
- "Maximum number of requests that can be sent on a persistent connection to the service group."
- "Note: Connection requests beyond this value are rejected."
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
cacheable:
description:
- "Use the transparent cache redirection virtual server to forward the request to the cache server."
- "Note: Do not set this parameter if you set the Cache Type."
type: bool
cip:
choices:
- 'enabled'
- 'disabled'
description:
- "Insert the Client IP header in requests forwarded to the service."
cipheader:
description:
- >-
Name of the HTTP header whose value must be set to the IP address of the client. Used with the Client
IP parameter. If client IP insertion is enabled, and the client IP header is not specified, the value
of Client IP Header parameter or the value set by the set ns config command is used as client's IP
header name.
- "Minimum length = 1"
usip:
description:
- >-
Use client's IP address as the source IP address when initiating connection to the server. With the
NO setting, which is the default, a mapped IP (MIP) address or subnet IP (SNIP) address is used as
the source IP address to initiate server side connections.
pathmonitor:
description:
- "Path monitoring for clustering."
pathmonitorindv:
description:
- "Individual Path monitoring decisions."
useproxyport:
description:
- >-
Use the proxy port as the source port when initiating connections with the server. With the NO
setting, the client-side connection port is used as the source port for the server-side connection.
- "Note: This parameter is available only when the Use Source IP C(usip) parameter is set to C(yes)."
type: bool
healthmonitor:
description:
- "Monitor the health of this service. Available settings function as follows:"
- "C(yes) - Send probes to check the health of the service."
- >-
C(no) - Do not send probes to check the health of the service. With the NO option, the appliance shows
the service as UP at all times.
type: bool
sp:
description:
- "Enable surge protection for the service group."
type: bool
rtspsessionidremap:
description:
- "Enable RTSP session ID mapping for the service group."
type: bool
clttimeout:
description:
- "Time, in seconds, after which to terminate an idle client connection."
- "Minimum value = C(0)"
- "Maximum value = C(31536000)"
svrtimeout:
description:
- "Time, in seconds, after which to terminate an idle server connection."
- "Minimum value = C(0)"
- "Maximum value = C(31536000)"
cka:
description:
- "Enable client keep-alive for the service group."
type: bool
tcpb:
description:
- "Enable TCP buffering for the service group."
type: bool
cmp:
description:
- "Enable compression for the specified service."
type: bool
maxbandwidth:
description:
- "Maximum bandwidth, in Kbps, allocated for all the services in the service group."
- "Minimum value = C(0)"
- "Maximum value = C(4294967287)"
monthreshold:
description:
- >-
Minimum sum of weights of the monitors that are bound to this service. Used to determine whether to
mark a service as UP or DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with all the services in the service group whose state
transitions from UP to DOWN. Do not enable this option for applications that must complete their
transactions.
tcpprofilename:
description:
- "Name of the TCP profile that contains TCP configuration settings for the service group."
- "Minimum length = 1"
- "Maximum length = 127"
httpprofilename:
description:
- "Name of the HTTP profile that contains HTTP configuration settings for the service group."
- "Minimum length = 1"
- "Maximum length = 127"
comment:
description:
- "Any information about the service group."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging of AppFlow information for the specified service group."
netprofile:
description:
- "Network profile for the service group."
- "Minimum length = 1"
- "Maximum length = 127"
autoscale:
choices:
- 'DISABLED'
- 'DNS'
- 'POLICY'
description:
- "Auto scale option for a servicegroup."
memberport:
description:
- "member port."
graceful:
description:
- "Wait for all existing connections to the service to terminate before shutting down the service."
type: bool
servicemembers:
description:
- A list of dictionaries describing each service member of the service group.
suboptions:
ip:
description:
- IP address of the service. Must not overlap with an existing server entity defined by name.
port:
description:
- Server port number.
- Range C(1) - C(65535)
- "* in CLI is represented as 65535 in NITRO API"
state:
choices:
- 'enabled'
- 'disabled'
description:
- Initial state of the service after binding.
hashid:
description:
- The hash identifier for the service.
- This must be unique for each service.
- This parameter is used by hash based load balancing methods.
- Minimum value = C(1)
serverid:
description:
- The identifier for the service.
- This is used when the persistency type is set to Custom Server ID.
servername:
description:
- Name of the server to which to bind the service group.
- The server must already be configured as a named server.
- Minimum length = 1
customserverid:
description:
- The identifier for this IP:Port pair.
- Used when the persistency type is set to Custom Server ID.
weight:
description:
- Weight to assign to the servers in the service group.
- Specifies the capacity of the servers relative to the other servers in the load balancing configuration.
- The higher the weight, the higher the percentage of requests sent to the service.
- Minimum value = C(1)
- Maximum value = C(100)
monitorbindings:
description:
- A list of monitornames to bind to this service
- Note that the monitors must have already been setup possibly using the M(netscaler_lb_monitor) module or some other method
suboptions:
monitorname:
description:
- The monitor name to bind to this servicegroup.
weight:
description:
- Weight to assign to the binding between the monitor and servicegroup.
disabled:
description:
- When set to C(yes) the service group state will be set to DISABLED.
- When set to C(no) the service group state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
# The LB Monitors monitor-1 and monitor-2 must already exist
# Service members defined by C(ip) must not redefine an existing server's ip address.
# Service members defined by C(servername) must already exist.
- name: Setup http service with ip members
delegate_to: localhost
netscaler_servicegroup:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
state: present
servicegroupname: service-group-1
servicetype: HTTP
servicemembers:
- ip: 10.78.78.78
port: 80
weight: 50
- ip: 10.79.79.79
port: 80
weight: 40
- servername: server-1
port: 80
weight: 10
monitorbindings:
- monitorname: monitor-1
weight: 50
- monitorname: monitor-2
weight: 50
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'clttimeout': 'difference. ours: (float) 10.0 other: (float) 20.0' }
'''
from ansible.module_utils.basic import AnsibleModule
import copy
from ansible.module_utils.network.netscaler.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, \
loglines, get_immutables_intersection
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup import servicegroup
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_servicegroupmember_binding import servicegroup_servicegroupmember_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.servicegroup_lbmonitor_binding import servicegroup_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbmonitor_servicegroup_binding import lbmonitor_servicegroup_binding
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def servicegroup_exists(client, module):
log('Checking if service group exists')
count = servicegroup.count_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname'])
log('count is %s' % count)
if count > 0:
return True
else:
return False
def servicegroup_identical(client, module, servicegroup_proxy):
log('Checking if service group is identical')
servicegroups = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname'])
if servicegroup_proxy.has_equal_attributes(servicegroups[0]):
return True
else:
return False
def get_configured_service_members(client, module):
log('get_configured_service_members')
readwrite_attrs = [
'servicegroupname',
'ip',
'port',
'state',
'hashid',
'serverid',
'servername',
'customserverid',
'weight'
]
readonly_attrs = [
'delay',
'statechangetimesec',
'svrstate',
'tickssincelaststatechange',
'graceful',
]
members = []
if module.params['servicemembers'] is None:
return members
for config in module.params['servicemembers']:
# Make a copy to update
config = copy.deepcopy(config)
config['servicegroupname'] = module.params['servicegroupname']
member_proxy = ConfigProxy(
actual=servicegroup_servicegroupmember_binding(),
client=client,
attribute_values_dict=config,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs
)
members.append(member_proxy)
return members
def get_actual_service_members(client, module):
try:
# count() raises nitro exception instead of returning 0
count = servicegroup_servicegroupmember_binding.count(client, module.params['servicegroupname'])
if count > 0:
servicegroup_members = servicegroup_servicegroupmember_binding.get(client, module.params['servicegroupname'])
else:
servicegroup_members = []
except nitro_exception as e:
if e.errorcode == 258:
servicegroup_members = []
else:
raise
return servicegroup_members
def servicemembers_identical(client, module):
log('servicemembers_identical')
servicegroup_members = get_actual_service_members(client, module)
log('servicemembers %s' % servicegroup_members)
module_servicegroups = get_configured_service_members(client, module)
log('Number of service group members %s' % len(servicegroup_members))
if len(servicegroup_members) != len(module_servicegroups):
return False
# Fallthrough to member evaluation
identical_count = 0
for actual_member in servicegroup_members:
for member in module_servicegroups:
if member.has_equal_attributes(actual_member):
identical_count += 1
break
if identical_count != len(servicegroup_members):
return False
# Fallthrough to success
return True
def sync_service_members(client, module):
log('sync_service_members')
configured_service_members = get_configured_service_members(client, module)
actual_service_members = get_actual_service_members(client, module)
skip_add = []
skip_delete = []
# Find positions of identical service members
for (configured_index, configured_service) in enumerate(configured_service_members):
for (actual_index, actual_service) in enumerate(actual_service_members):
if configured_service.has_equal_attributes(actual_service):
skip_add.append(configured_index)
skip_delete.append(actual_index)
# Delete actual that are not identical to any configured
for (actual_index, actual_service) in enumerate(actual_service_members):
# Skip identical
if actual_index in skip_delete:
log('Skipping actual delete at index %s' % actual_index)
continue
# Fallthrouth to deletion
if all([
hasattr(actual_service, 'ip'),
actual_service.ip is not None,
hasattr(actual_service, 'servername'),
actual_service.servername is not None,
]):
actual_service.ip = None
actual_service.servicegroupname = module.params['servicegroupname']
servicegroup_servicegroupmember_binding.delete(client, actual_service)
# Add configured that are not already present in actual
for (configured_index, configured_service) in enumerate(configured_service_members):
# Skip identical
if configured_index in skip_add:
log('Skipping configured add at index %s' % configured_index)
continue
# Fallthrough to addition
configured_service.add()
def monitor_binding_equal(configured, actual):
if any([configured.monitorname != actual.monitor_name,
configured.servicegroupname != actual.servicegroupname,
configured.weight != float(actual.weight)]):
return False
return True
def get_configured_monitor_bindings(client, module):
log('Entering get_configured_monitor_bindings')
bindings = {}
if 'monitorbindings' in module.params and module.params['monitorbindings'] is not None:
for binding in module.params['monitorbindings']:
readwrite_attrs = [
'monitorname',
'servicegroupname',
'weight',
]
readonly_attrs = []
attribute_values_dict = copy.deepcopy(binding)
attribute_values_dict['servicegroupname'] = module.params['servicegroupname']
binding_proxy = ConfigProxy(
actual=lbmonitor_servicegroup_binding(),
client=client,
attribute_values_dict=attribute_values_dict,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
)
key = attribute_values_dict['monitorname']
bindings[key] = binding_proxy
return bindings
def get_actual_monitor_bindings(client, module):
log('Entering get_actual_monitor_bindings')
bindings = {}
try:
# count() raises nitro exception instead of returning 0
count = servicegroup_lbmonitor_binding.count(client, module.params['servicegroupname'])
except nitro_exception as e:
if e.errorcode == 258:
return bindings
else:
raise
if count == 0:
return bindings
# Fallthrough to rest of execution
for binding in servicegroup_lbmonitor_binding.get(client, module.params['servicegroupname']):
log('Gettign actual monitor with name %s' % binding.monitor_name)
key = binding.monitor_name
bindings[key] = binding
return bindings
def monitor_bindings_identical(client, module):
log('Entering monitor_bindings_identical')
configured_bindings = get_configured_monitor_bindings(client, module)
actual_bindings = get_actual_monitor_bindings(client, module)
configured_key_set = set(configured_bindings.keys())
actual_key_set = set(actual_bindings.keys())
symmetrical_diff = configured_key_set ^ actual_key_set
for default_monitor in ('tcp-default', 'ping-default'):
if default_monitor in symmetrical_diff:
log('Excluding %s monitor from key comparison' % default_monitor)
symmetrical_diff.remove(default_monitor)
if len(symmetrical_diff) > 0:
return False
# Compare key to key
for key in configured_key_set:
configured_proxy = configured_bindings[key]
# Follow nscli convention for missing weight value
if not hasattr(configured_proxy, 'weight'):
configured_proxy.weight = 1
log('configured_proxy %s' % [configured_proxy.monitorname, configured_proxy.servicegroupname, configured_proxy.weight])
log('actual_bindings %s' % [actual_bindings[key].monitor_name, actual_bindings[key].servicegroupname, actual_bindings[key].weight])
if not monitor_binding_equal(configured_proxy, actual_bindings[key]):
return False
# Fallthrought to success
return True
def sync_monitor_bindings(client, module):
log('Entering sync_monitor_bindings')
actual_bindings = get_actual_monitor_bindings(client, module)
# Exclude default monitors from deletion
for monitorname in ('tcp-default', 'ping-default'):
if monitorname in actual_bindings:
del actual_bindings[monitorname]
configured_bindings = get_configured_monitor_bindings(client, module)
to_remove = list(set(actual_bindings.keys()) - set(configured_bindings.keys()))
to_add = list(set(configured_bindings.keys()) - set(actual_bindings.keys()))
to_modify = list(set(configured_bindings.keys()) & set(actual_bindings.keys()))
# Delete existing and modifiable bindings
for key in to_remove + to_modify:
binding = actual_bindings[key]
b = lbmonitor_servicegroup_binding()
b.monitorname = binding.monitor_name
b.servicegroupname = module.params['servicegroupname']
# Cannot remove default monitor bindings
if b.monitorname in ('tcp-default', 'ping-default'):
continue
lbmonitor_servicegroup_binding.delete(client, b)
# Add new and modified bindings
for key in to_add + to_modify:
binding = configured_bindings[key]
log('Adding %s' % binding.monitorname)
binding.add()
def diff(client, module, servicegroup_proxy):
servicegroup_list = servicegroup.get_filtered(client, 'servicegroupname:%s' % module.params['servicegroupname'])
diff_object = servicegroup_proxy.diff_object(servicegroup_list[0])
return diff_object
def do_state_change(client, module, servicegroup_proxy):
if module.params['disabled']:
log('Disabling service')
result = servicegroup.disable(client, servicegroup_proxy.actual)
else:
log('Enabling service')
result = servicegroup.enable(client, servicegroup_proxy.actual)
return result
def main():
module_specific_arguments = dict(
servicegroupname=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'DTLS',
'NNTP',
'RPCSVR',
'DNS',
'ADNS',
'SNMP',
'RTSP',
'DHCPRA',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'DNS_TCP',
'ADNS_TCP',
'MYSQL',
'MSSQL',
'ORACLE',
'RADIUS',
'RADIUSListener',
'RDP',
'DIAMETER',
'SSL_DIAMETER',
'TFTP',
'SMPP',
'PPTP',
'GRE',
'SYSLOGTCP',
'SYSLOGUDP',
'FIX',
'SSL_FIX',
]
),
cachetype=dict(
type='str',
choices=[
'TRANSPARENT',
'REVERSE',
'FORWARD',
]
),
maxclient=dict(type='float'),
maxreq=dict(type='float'),
cacheable=dict(type='bool'),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
usip=dict(type='bool'),
pathmonitor=dict(type='bool'),
pathmonitorindv=dict(type='bool'),
useproxyport=dict(type='bool'),
healthmonitor=dict(type='bool'),
sp=dict(type='bool'),
rtspsessionidremap=dict(type='bool'),
clttimeout=dict(type='float'),
svrtimeout=dict(type='float'),
cka=dict(type='bool'),
tcpb=dict(type='bool'),
cmp=dict(type='bool'),
maxbandwidth=dict(type='float'),
monthreshold=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
tcpprofilename=dict(type='str'),
httpprofilename=dict(type='str'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
netprofile=dict(type='str'),
autoscale=dict(
type='str',
choices=[
'DISABLED',
'DNS',
'POLICY',
]
),
memberport=dict(type='int'),
graceful=dict(type='bool'),
)
hand_inserted_arguments = dict(
servicemembers=dict(type='list'),
monitorbindings=dict(type='list'),
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Instantiate service group configuration object
readwrite_attrs = [
'servicegroupname',
'servicetype',
'cachetype',
'maxclient',
'maxreq',
'cacheable',
'cip',
'cipheader',
'usip',
'pathmonitor',
'pathmonitorindv',
'useproxyport',
'healthmonitor',
'sp',
'rtspsessionidremap',
'clttimeout',
'svrtimeout',
'cka',
'tcpb',
'cmp',
'maxbandwidth',
'monthreshold',
'downstateflush',
'tcpprofilename',
'httpprofilename',
'comment',
'appflowlog',
'netprofile',
'autoscale',
'memberport',
'graceful',
]
readonly_attrs = [
'numofconnections',
'serviceconftype',
'value',
'svrstate',
'ip',
'monstatcode',
'monstatparam1',
'monstatparam2',
'monstatparam3',
'statechangetimemsec',
'stateupdatereason',
'clmonowner',
'clmonview',
'groupcount',
'riseapbrstatsmsgcode2',
'serviceipstr',
'servicegroupeffectivestate'
]
immutable_attrs = [
'servicegroupname',
'servicetype',
'cachetype',
'td',
'cipheader',
'state',
'autoscale',
'memberport',
'servername',
'port',
'serverid',
'monitor_name_svc',
'dup_weight',
'riseapbrstatsmsgcode',
'delay',
'graceful',
'includemembers',
'newname',
]
transforms = {
'pathmonitorindv': ['bool_yes_no'],
'cacheable': ['bool_yes_no'],
'cka': ['bool_yes_no'],
'pathmonitor': ['bool_yes_no'],
'tcpb': ['bool_yes_no'],
'sp': ['bool_on_off'],
'usip': ['bool_yes_no'],
'healthmonitor': ['bool_yes_no'],
'useproxyport': ['bool_yes_no'],
'rtspsessionidremap': ['bool_on_off'],
'graceful': ['bool_yes_no'],
'cmp': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
}
# Instantiate config proxy
servicegroup_proxy = ConfigProxy(
actual=servicegroup(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
if module.params['state'] == 'present':
log('Applying actions for state present')
if not servicegroup_exists(client, module):
if not module.check_mode:
log('Adding service group')
servicegroup_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not servicegroup_identical(client, module, servicegroup_proxy):
# Check if we try to change value of immutable attributes
diff_dict = diff(client, module, servicegroup_proxy)
immutables_changed = get_immutables_intersection(servicegroup_proxy, diff_dict.keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s. Must delete and recreate entity.' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_dict, **module_result)
if not module.check_mode:
servicegroup_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Check bindings
if not monitor_bindings_identical(client, module):
if not module.check_mode:
sync_monitor_bindings(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
if not servicemembers_identical(client, module):
if not module.check_mode:
sync_service_members(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
if not module.check_mode:
res = do_state_change(client, module, servicegroup_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state present')
if not servicegroup_exists(client, module):
module.fail_json(msg='Service group is not present', **module_result)
if not servicegroup_identical(client, module, servicegroup_proxy):
module.fail_json(
msg='Service group is not identical to configuration',
diff=diff(client, module, servicegroup_proxy),
**module_result
)
if not servicemembers_identical(client, module):
module.fail_json(msg='Service group members differ from configuration', **module_result)
if not monitor_bindings_identical(client, module):
module.fail_json(msg='Monitor bindings are not identical', **module_result)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if servicegroup_exists(client, module):
if not module.check_mode:
servicegroup_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
log('Sanity checks for state absent')
if servicegroup_exists(client, module):
module.fail_json(msg='Service group is present', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=" + str(e.errorcode) + ",message=" + e.message
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
#
# test_codecmaps_kr.py
# Codec mapping tests for ROK encodings
#
from test import support
from test import multibytecodec_support
import unittest
class TestCP949Map(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'cp949'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT' \
'/WINDOWS/CP949.TXT'
class TestEUCKRMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'euc_kr'
mapfileurl = 'http://people.freebsd.org/~perky/i18n/EUC-KR.TXT'
# A4D4 HANGUL FILLER indicates the begin of 8-bytes make-up sequence.
pass_enctest = [(b'\xa4\xd4', '\u3164')]
pass_dectest = [(b'\xa4\xd4', '\u3164')]
class TestJOHABMap(multibytecodec_support.TestBase_Mapping,
unittest.TestCase):
encoding = 'johab'
mapfileurl = 'http://www.unicode.org/Public/MAPPINGS/OBSOLETE/EASTASIA/' \
'KSC/JOHAB.TXT'
# KS X 1001 standard assigned 0x5c as WON SIGN.
# but, in early 90s that is the only era used johab widely,
# the most softwares implements it as REVERSE SOLIDUS.
# So, we ignore the standard here.
pass_enctest = [(b'\\', '\u20a9')]
pass_dectest = [(b'\\', '\u20a9')]
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main() | unknown | codeparrot/codeparrot-clean | ||
# pylint: skip-file
# flake8: noqa
def main():
'''
ansible oadm module for manage-node
'''
module = AnsibleModule(
argument_spec=dict(
debug=dict(default=False, type='bool'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
node=dict(default=None, type='list'),
selector=dict(default=None, type='str'),
pod_selector=dict(default=None, type='str'),
schedulable=dict(default=None, type='bool'),
list_pods=dict(default=False, type='bool'),
evacuate=dict(default=False, type='bool'),
dry_run=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
grace_period=dict(default=None, type='int'),
),
mutually_exclusive=[["selector", "node"], ['evacuate', 'list_pods'], ['list_pods', 'schedulable']],
required_one_of=[["node", "selector"]],
supports_check_mode=True,
)
results = ManageNode.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == "__main__":
main() | unknown | codeparrot/codeparrot-clean | ||
use serde_derive::Serialize;
#[derive(Serialize)]
enum Enum {
#[serde(serialize_with = "serialize_some_newtype_variant")]
Struct {
#[serde(skip_serializing_if = "always")]
f1: String,
f2: u8,
},
}
fn main() {} | rust | github | https://github.com/serde-rs/serde | test_suite/tests/ui/with-variant/skip_ser_struct_field_if.rs |
/*!
* @license
* Copyright Google LLC All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license that can be
* found in the LICENSE file at https://angular.dev/license
*/
export const CODE_EDITOR_THEME_STYLES = {
'&': {
position: 'relative',
width: '100%',
height: '100%',
'background-color': 'var(--code-editor-background)',
color: 'var(--code-editor-text-base-color)',
},
'.cm-gutters': {
border: 'none',
},
'.cm-gutter': {
'background-color': 'var(--code-editor-background)',
color: 'var(--code-editor-code-base-color)',
},
'.cm-line.cm-activeLine': {
'background-color': 'var(--code-editor-active-line-background)',
},
'.cm-activeLineGutter': {
'background-color': 'var(--code-editor-active-line-background)',
},
'&.cm-focused .cm-selectionBackground': {
'background-color': 'var(--code-editor-focused-selection-background) !important',
},
'.cm-selectionBackground': {
'background-color': 'var(--code-editor-selection-background) !important',
},
'.cm-cursor': {
'border-color': 'var(--code-editor-cursor-color)',
},
'.cm-tooltip': {
color: 'var(--code-editor-tooltip-color)',
border: 'var(--code-editor-tooltip-border)',
'border-radius': 'var(--code-editor-tooltip-border-radius)',
background: 'var(--code-editor-tooltip-background)',
'overflow-y': 'scroll',
'max-height': '70%',
'max-width': '100%',
},
'.cm-tooltip.cm-tooltip-autocomplete > ul': {
background: 'var(--code-editor-autocomplete-background)',
},
'.cm-tooltip .keyword': {
color: 'var(--code-module-keyword)',
},
'.cm-tooltip .aliasName': {
color: 'var(--code-variable-name)',
},
'.cm-tooltip .localName': {
color: 'var(--code-variable-name)',
},
'.cm-tooltip-autocomplete ul li[aria-selected]': {
background: 'var(--code-editor-autocomplete-item-background)',
color: 'var(--code-editor-autocomplete-item-color)',
},
'.cm-tooltip-lint': {
background: 'var(--code-editor-lint-tooltip-background)',
color: 'var(--code-editor-lint-tooltip-color)',
},
'.cm-panels': {
background: 'var(--code-editor-panels-background)',
color: 'var(--code-editor-panels-color)',
},
'.cm-foldPlaceholder': {
background: 'var(--code-editor-fold-placeholder-background)',
},
}; | typescript | github | https://github.com/angular/angular | adev/src/app/editor/code-editor/constants/theme-styles.ts |
# -*- coding: utf-8 -*-
# ******************************************************************************
#
# Copyright (C) 2009-2011 Olivier Tilloy <olivier@tilloy.net>
#
# This file is part of the pyexiv2 distribution.
#
# pyexiv2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# pyexiv2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyexiv2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, 5th Floor, Boston, MA 02110-1301 USA.
#
# Author: Olivier Tilloy <olivier@tilloy.net>
#
# ******************************************************************************
from pyexiv2.metadata import ImageMetadata
from pyexiv2.exif import ExifTag
from pyexiv2.iptc import IptcTag
from pyexiv2.xmp import XmpTag
from pyexiv2.utils import FixedOffset, make_fraction
import datetime
import os
import tempfile
import time
import unittest
from testutils import EMPTY_JPG_DATA
class TestImageMetadata(unittest.TestCase):
def setUp(self):
# Create an empty image file
fd, self.pathname = tempfile.mkstemp(suffix='.jpg')
os.write(fd, EMPTY_JPG_DATA)
os.close(fd)
# Write some metadata
m = ImageMetadata(self.pathname)
m.read()
m['Exif.Image.Make'] = 'EASTMAN KODAK COMPANY'
m['Exif.Image.DateTime'] = datetime.datetime(2009, 2, 9, 13, 33, 20)
m['Iptc.Application2.Caption'] = ['blabla']
m['Iptc.Application2.DateCreated'] = [datetime.date(2004, 7, 13)]
m['Xmp.dc.format'] = ('image', 'jpeg')
m['Xmp.dc.subject'] = ['image', 'test', 'pyexiv2']
m.comment = 'Hello World!'
m.write()
self.metadata = ImageMetadata(self.pathname)
def tearDown(self):
os.remove(self.pathname)
######################
# Test general methods
######################
def test_not_read_raises(self):
# http://bugs.launchpad.net/pyexiv2/+bug/687373
self.assertRaises(IOError, self.metadata.write)
self.assertRaises(IOError, getattr, self.metadata, 'dimensions')
self.assertRaises(IOError, getattr, self.metadata, 'mime_type')
self.assertRaises(IOError, getattr, self.metadata, 'exif_keys')
self.assertRaises(IOError, getattr, self.metadata, 'iptc_keys')
self.assertRaises(IOError, getattr, self.metadata, 'xmp_keys')
self.assertRaises(IOError, self.metadata._get_exif_tag, 'Exif.Image.Make')
self.assertRaises(IOError, self.metadata._get_iptc_tag, 'Iptc.Application2.Caption')
self.assertRaises(IOError, self.metadata._get_xmp_tag, 'Xmp.dc.format')
self.assertRaises(IOError, self.metadata._set_exif_tag, 'Exif.Image.Make', 'foobar')
self.assertRaises(IOError, self.metadata._set_iptc_tag, 'Iptc.Application2.Caption', ['foobar'])
self.assertRaises(IOError, self.metadata._set_xmp_tag, 'Xmp.dc.format', ('foo', 'bar'))
self.assertRaises(IOError, self.metadata._delete_exif_tag, 'Exif.Image.Make')
self.assertRaises(IOError, self.metadata._delete_iptc_tag, 'Iptc.Application2.Caption')
self.assertRaises(IOError, self.metadata._delete_xmp_tag, 'Xmp.dc.format')
self.assertRaises(IOError, getattr, self.metadata, 'comment')
self.assertRaises(IOError, setattr, self.metadata, 'comment', 'foobar')
self.assertRaises(IOError, delattr, self.metadata, 'comment')
self.assertRaises(IOError, getattr, self.metadata, 'previews')
other = ImageMetadata(self.pathname)
self.assertRaises(IOError, self.metadata.copy, other)
self.assertRaises(IOError, getattr, self.metadata, 'buffer')
thumb = self.metadata.exif_thumbnail
self.assertRaises(IOError, getattr, thumb, 'mime_type')
self.assertRaises(IOError, getattr, thumb, 'extension')
self.assertRaises(IOError, thumb.write_to_file, '/tmp/foobar.jpg')
self.assertRaises(IOError, thumb.erase)
self.assertRaises(IOError, thumb.set_from_file, '/tmp/foobar.jpg')
self.assertRaises(IOError, getattr, thumb, 'data')
self.assertRaises(IOError, setattr, thumb, 'data', EMPTY_JPG_DATA)
self.assertRaises(IOError, getattr, self.metadata, 'iptc_charset')
def test_read(self):
self.assertRaises(IOError, getattr, self.metadata, '_image')
self.metadata.read()
self.failIfEqual(self.metadata._image, None)
def test_read_nonexistent_file(self):
metadata = ImageMetadata('idontexist')
self.failUnlessRaises(IOError, metadata.read)
def test_write_preserve_timestamps(self):
stat = os.stat(self.pathname)
atime = round(stat.st_atime)
mtime = round(stat.st_mtime)
metadata = ImageMetadata(self.pathname)
metadata.read()
metadata.comment = 'Yellow Submarine'
time.sleep(1.1)
metadata.write(preserve_timestamps=True)
stat2 = os.stat(self.pathname)
atime2 = round(stat2.st_atime)
mtime2 = round(stat2.st_mtime)
self.failUnlessEqual(atime2, atime)
self.failUnlessEqual(mtime2, mtime)
def test_write_dont_preserve_timestamps(self):
stat = os.stat(self.pathname)
atime = round(stat.st_atime)
mtime = round(stat.st_mtime)
metadata = ImageMetadata(self.pathname)
metadata.read()
metadata.comment = 'Yellow Submarine'
time.sleep(1.1)
metadata.write()
stat2 = os.stat(self.pathname)
atime2 = round(stat2.st_atime)
mtime2 = round(stat2.st_mtime)
# It is not safe to assume that atime will have been modified when the
# file has been read, as it may depend on mount options (e.g. noatime,
# relatime).
# See discussion at http://bugs.launchpad.net/pyexiv2/+bug/624999.
#self.failIfEqual(atime2, atime)
self.failIfEqual(mtime2, mtime)
metadata.comment = 'Yesterday'
time.sleep(1.1)
metadata.write(preserve_timestamps=True)
stat3 = os.stat(self.pathname)
atime3 = round(stat3.st_atime)
mtime3 = round(stat3.st_mtime)
self.failUnlessEqual(atime3, atime2)
self.failUnlessEqual(mtime3, mtime2)
###########################
# Test EXIF-related methods
###########################
def test_exif_keys(self):
self.metadata.read()
self.assertEqual(self.metadata._keys['exif'], None)
keys = self.metadata.exif_keys
self.assertEqual(len(keys), 2)
self.assertEqual(self.metadata._keys['exif'], keys)
def test_get_exif_tag(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['exif'], {})
# Get an existing tag
key = 'Exif.Image.Make'
tag = self.metadata._get_exif_tag(key)
self.assert_(isinstance(tag, ExifTag))
self.assertEqual(self.metadata._tags['exif'][key], tag)
# Try to get an nonexistent tag
key = 'Exif.Photo.Sharpness'
self.failUnlessRaises(KeyError, self.metadata._get_exif_tag, key)
def test_set_exif_tag_wrong(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['exif'], {})
# Try to set a tag with wrong type
tag = 'Not an exif tag'
self.failUnlessRaises(TypeError, self.metadata._set_exif_tag, tag)
self.assertEqual(self.metadata._tags['exif'], {})
def test_set_exif_tag_create(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['exif'], {})
# Create a new tag
tag = ExifTag('Exif.Thumbnail.Orientation', 1)
self.assert_(tag.key not in self.metadata.exif_keys)
self.metadata._set_exif_tag(tag.key, tag)
self.assert_(tag.key in self.metadata.exif_keys)
self.assertEqual(self.metadata._tags['exif'], {tag.key: tag})
self.assert_(tag.key in self.metadata._image._exifKeys())
self.assertEqual(self.metadata._image._getExifTag(tag.key)._getRawValue(),
tag.raw_value)
def test_set_exif_tag_overwrite(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['exif'], {})
# Overwrite an existing tag
tag = ExifTag('Exif.Image.DateTime', datetime.datetime(2009, 3, 20, 20, 32, 0))
self.metadata._set_exif_tag(tag.key, tag)
self.assertEqual(self.metadata._tags['exif'], {tag.key: tag})
self.assert_(tag.key in self.metadata._image._exifKeys())
self.assertEqual(self.metadata._image._getExifTag(tag.key)._getRawValue(),
tag.raw_value)
def test_set_exif_tag_overwrite_already_cached(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['exif'], {})
# Overwrite an existing tag already cached
key = 'Exif.Image.Make'
tag = self.metadata._get_exif_tag(key)
self.assertEqual(self.metadata._tags['exif'][key], tag)
new_tag = ExifTag(key, 'World Company')
self.metadata._set_exif_tag(key, new_tag)
self.assertEqual(self.metadata._tags['exif'], {key: new_tag})
self.assert_(key in self.metadata._image._exifKeys())
self.assertEqual(self.metadata._image._getExifTag(key)._getRawValue(),
new_tag.raw_value)
def test_set_exif_tag_direct_value_assignment(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['exif'], {})
# Direct value assignment: pass a value instead of a fully-formed tag
key = 'Exif.Thumbnail.Orientation'
value = 1
self.metadata._set_exif_tag(key, value)
self.assert_(key in self.metadata.exif_keys)
self.assert_(key in self.metadata._image._exifKeys())
tag = self.metadata._get_exif_tag(key)
self.assertEqual(tag.value, value)
self.assertEqual(self.metadata._tags['exif'], {key: tag})
self.assertEqual(self.metadata._image._getExifTag(key)._getRawValue(),
tag.raw_value)
def test_delete_exif_tag_inexistent(self):
self.metadata.read()
key = 'Exif.Image.Artist'
self.failUnlessRaises(KeyError, self.metadata._delete_exif_tag, key)
def test_delete_exif_tag_not_cached(self):
self.metadata.read()
key = 'Exif.Image.DateTime'
self.assertEqual(self.metadata._tags['exif'], {})
self.assert_(key in self.metadata.exif_keys)
self.metadata._delete_exif_tag(key)
self.assertEqual(self.metadata._tags['exif'], {})
self.failIf(key in self.metadata.exif_keys)
def test_delete_exif_tag_cached(self):
self.metadata.read()
key = 'Exif.Image.DateTime'
self.assert_(key in self.metadata.exif_keys)
tag = self.metadata._get_exif_tag(key)
self.assertEqual(self.metadata._tags['exif'][key], tag)
self.metadata._delete_exif_tag(key)
self.assertEqual(self.metadata._tags['exif'], {})
self.failIf(key in self.metadata.exif_keys)
###########################
# Test IPTC-related methods
###########################
def test_iptc_keys(self):
self.metadata.read()
self.assertEqual(self.metadata._keys['iptc'], None)
keys = self.metadata.iptc_keys
self.assertEqual(len(keys), 2)
self.assertEqual(self.metadata._keys['iptc'], keys)
def test_get_iptc_tag(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['iptc'], {})
# Get an existing tag
key = 'Iptc.Application2.DateCreated'
tag = self.metadata._get_iptc_tag(key)
self.assert_(isinstance(tag, IptcTag))
self.assertEqual(self.metadata._tags['iptc'][key], tag)
# Try to get an nonexistent tag
key = 'Iptc.Application2.Copyright'
self.failUnlessRaises(KeyError, self.metadata._get_iptc_tag, key)
def test_set_iptc_tag_wrong(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['iptc'], {})
# Try to set a tag with wrong type
tag = 'Not an iptc tag'
self.failUnlessRaises(TypeError, self.metadata._set_iptc_tag, tag)
self.assertEqual(self.metadata._tags['iptc'], {})
def test_set_iptc_tag_create(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['iptc'], {})
# Create a new tag
tag = IptcTag('Iptc.Application2.Writer', ['Nobody'])
self.assert_(tag.key not in self.metadata.iptc_keys)
self.metadata._set_iptc_tag(tag.key, tag)
self.assert_(tag.key in self.metadata.iptc_keys)
self.assertEqual(self.metadata._tags['iptc'], {tag.key: tag})
self.assert_(tag.key in self.metadata._image._iptcKeys())
self.assertEqual(self.metadata._image._getIptcTag(tag.key)._getRawValues(),
tag.raw_value)
def test_set_iptc_tag_overwrite(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['iptc'], {})
# Overwrite an existing tag
tag = IptcTag('Iptc.Application2.Caption', ['A picture.'])
self.metadata._set_iptc_tag(tag.key, tag)
self.assertEqual(self.metadata._tags['iptc'], {tag.key: tag})
self.assert_(tag.key in self.metadata._image._iptcKeys())
self.assertEqual(self.metadata._image._getIptcTag(tag.key)._getRawValues(),
tag.raw_value)
def test_set_iptc_tag_overwrite_already_cached(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['iptc'], {})
# Overwrite an existing tag already cached
key = 'Iptc.Application2.Caption'
tag = self.metadata._get_iptc_tag(key)
self.assertEqual(self.metadata._tags['iptc'][key], tag)
new_tag = IptcTag(key, ['A picture.'])
self.metadata._set_iptc_tag(key, new_tag)
self.assertEqual(self.metadata._tags['iptc'], {key: new_tag})
self.assert_(key in self.metadata._image._iptcKeys())
self.assertEqual(self.metadata._image._getIptcTag(key)._getRawValues(),
new_tag.raw_value)
def test_set_iptc_tag_direct_value_assignment(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['iptc'], {})
# Direct value assignment: pass a value instead of a fully-formed tag
key = 'Iptc.Application2.Writer'
values = ['Nobody']
self.metadata._set_iptc_tag(key, values)
self.assert_(key in self.metadata.iptc_keys)
self.assert_(key in self.metadata._image._iptcKeys())
tag = self.metadata._get_iptc_tag(key)
self.assertEqual(tag.value, values)
self.assertEqual(self.metadata._tags['iptc'], {key: tag})
self.assertEqual(self.metadata._image._getIptcTag(key)._getRawValues(),
tag.raw_value)
def test_delete_iptc_tag_inexistent(self):
self.metadata.read()
key = 'Iptc.Application2.LocationCode'
self.failUnlessRaises(KeyError, self.metadata._delete_iptc_tag, key)
def test_delete_iptc_tag_not_cached(self):
self.metadata.read()
key = 'Iptc.Application2.Caption'
self.assertEqual(self.metadata._tags['iptc'], {})
self.assert_(key in self.metadata.iptc_keys)
self.metadata._delete_iptc_tag(key)
self.assertEqual(self.metadata._tags['iptc'], {})
self.failIf(key in self.metadata.iptc_keys)
def test_delete_iptc_tag_cached(self):
self.metadata.read()
key = 'Iptc.Application2.Caption'
self.assert_(key in self.metadata.iptc_keys)
tag = self.metadata._get_iptc_tag(key)
self.assertEqual(self.metadata._tags['iptc'][key], tag)
self.metadata._delete_iptc_tag(key)
self.assertEqual(self.metadata._tags['iptc'], {})
self.failIf(key in self.metadata.iptc_keys)
##########################
# Test XMP-related methods
##########################
def test_xmp_keys(self):
self.metadata.read()
self.assertEqual(self.metadata._keys['xmp'], None)
keys = self.metadata.xmp_keys
self.assertEqual(len(keys), 2)
self.assertEqual(self.metadata._keys['xmp'], keys)
def test_get_xmp_tag(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['xmp'], {})
# Get an existing tag
key = 'Xmp.dc.subject'
tag = self.metadata._get_xmp_tag(key)
self.assert_(isinstance(tag, XmpTag))
self.assertEqual(self.metadata._tags['xmp'][key], tag)
# Try to get an nonexistent tag
key = 'Xmp.xmp.Label'
self.failUnlessRaises(KeyError, self.metadata._get_xmp_tag, key)
def test_set_xmp_tag_wrong(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['xmp'], {})
# Try to set a tag with wrong type
tag = 'Not an xmp tag'
self.failUnlessRaises(TypeError, self.metadata._set_xmp_tag, tag)
self.assertEqual(self.metadata._tags['xmp'], {})
def test_set_xmp_tag_create(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['xmp'], {})
# Create a new tag
tag = XmpTag('Xmp.dc.title', {'x-default': 'This is not a title',
'fr-FR': "Ceci n'est pas un titre"})
self.assert_(tag.key not in self.metadata.xmp_keys)
self.metadata._set_xmp_tag(tag.key, tag)
self.assert_(tag.key in self.metadata.xmp_keys)
self.assertEqual(self.metadata._tags['xmp'], {tag.key: tag})
self.assert_(tag.key in self.metadata._image._xmpKeys())
self.assertEqual(self.metadata._image._getXmpTag(tag.key)._getLangAltValue(),
tag.raw_value)
def test_set_xmp_tag_overwrite(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['xmp'], {})
# Overwrite an existing tag
tag = XmpTag('Xmp.dc.format', ('image', 'png'))
self.metadata._set_xmp_tag(tag.key, tag)
self.assertEqual(self.metadata._tags['xmp'], {tag.key: tag})
self.assert_(tag.key in self.metadata._image._xmpKeys())
self.assertEqual(self.metadata._image._getXmpTag(tag.key)._getTextValue(),
tag.raw_value)
def test_set_xmp_tag_overwrite_already_cached(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['xmp'], {})
# Overwrite an existing tag already cached
key = 'Xmp.dc.subject'
tag = self.metadata._get_xmp_tag(key)
self.assertEqual(self.metadata._tags['xmp'][key], tag)
new_tag = XmpTag(key, ['hello', 'world'])
self.metadata._set_xmp_tag(key, new_tag)
self.assertEqual(self.metadata._tags['xmp'], {key: new_tag})
self.assert_(key in self.metadata._image._xmpKeys())
self.assertEqual(self.metadata._image._getXmpTag(key)._getArrayValue(),
new_tag.raw_value)
def test_set_xmp_tag_direct_value_assignment(self):
self.metadata.read()
self.assertEqual(self.metadata._tags['xmp'], {})
# Direct value assignment: pass a value instead of a fully-formed tag
key = 'Xmp.dc.title'
value = {'x-default': 'This is not a title',
'fr-FR': "Ceci n'est pas un titre"}
self.metadata._set_xmp_tag(key, value)
self.assert_(key in self.metadata.xmp_keys)
self.assert_(key in self.metadata._image._xmpKeys())
tag = self.metadata._get_xmp_tag(key)
self.assertEqual(tag.value, value)
self.assertEqual(self.metadata._tags['xmp'], {key: tag})
self.assertEqual(self.metadata._image._getXmpTag(key)._getLangAltValue(), tag.raw_value)
def test_delete_xmp_tag_inexistent(self):
self.metadata.read()
key = 'Xmp.xmp.CreatorTool'
self.failUnlessRaises(KeyError, self.metadata._delete_xmp_tag, key)
def test_delete_xmp_tag_not_cached(self):
self.metadata.read()
key = 'Xmp.dc.subject'
self.assertEqual(self.metadata._tags['xmp'], {})
self.assert_(key in self.metadata.xmp_keys)
self.metadata._delete_xmp_tag(key)
self.assertEqual(self.metadata._tags['xmp'], {})
self.failIf(key in self.metadata.xmp_keys)
def test_delete_xmp_tag_cached(self):
self.metadata.read()
key = 'Xmp.dc.subject'
self.assert_(key in self.metadata.xmp_keys)
tag = self.metadata._get_xmp_tag(key)
self.assertEqual(self.metadata._tags['xmp'][key], tag)
self.metadata._delete_xmp_tag(key)
self.assertEqual(self.metadata._tags['xmp'], {})
self.failIf(key in self.metadata.xmp_keys)
###########################
# Test dictionary interface
###########################
def test_getitem(self):
self.metadata.read()
# Get existing tags
key = 'Exif.Image.DateTime'
tag = self.metadata[key]
self.assert_(isinstance(tag, ExifTag))
key = 'Iptc.Application2.Caption'
tag = self.metadata[key]
self.assert_(isinstance(tag, IptcTag))
key = 'Xmp.dc.format'
tag = self.metadata[key]
self.assert_(isinstance(tag, XmpTag))
# Try to get nonexistent tags
keys = ('Exif.Image.SamplesPerPixel', 'Iptc.Application2.FixtureId',
'Xmp.xmp.Rating', 'Wrong.Noluck.Raise')
for key in keys:
self.failUnlessRaises(KeyError, self.metadata.__getitem__, key)
def test_setitem(self):
self.metadata.read()
# Set new tags
key = 'Exif.Photo.ExposureBiasValue'
tag = ExifTag(key, make_fraction(0, 3))
self.metadata[key] = tag
self.failUnless(key in self.metadata._tags['exif'])
self.failUnlessEqual(self.metadata._tags['exif'][key], tag)
key = 'Iptc.Application2.City'
tag = IptcTag(key, ['Barcelona'])
self.metadata[key] = tag
self.failUnless(key in self.metadata._tags['iptc'])
self.failUnlessEqual(self.metadata._tags['iptc'][key], tag)
key = 'Xmp.dc.description'
tag = XmpTag(key, {'x-default': 'Sunset picture.'})
self.metadata[key] = tag
self.failUnless(key in self.metadata._tags['xmp'])
self.failUnlessEqual(self.metadata._tags['xmp'][key], tag)
# Replace existing tags
key = 'Exif.Photo.ExifVersion'
tag = ExifTag(key, '0220')
self.metadata[key] = tag
self.failUnless(key in self.metadata._tags['exif'])
self.failUnlessEqual(self.metadata._tags['exif'][key], tag)
key = 'Iptc.Application2.Caption'
tag = IptcTag(key, ['Sunset on Barcelona.'])
self.metadata[key] = tag
self.failUnless(key in self.metadata._tags['iptc'])
self.failUnlessEqual(self.metadata._tags['iptc'][key], tag)
key = 'Xmp.dc.subject'
tag = XmpTag(key, ['sunset', 'Barcelona', 'beautiful', 'beach'])
self.metadata[key] = tag
self.failUnless(key in self.metadata._tags['xmp'])
self.failUnlessEqual(self.metadata._tags['xmp'][key], tag)
def test_delitem(self):
self.metadata.read()
# Delete existing tags
key = 'Exif.Image.Make'
del self.metadata[key]
self.failIf(key in self.metadata._keys['exif'])
self.failIf(key in self.metadata._tags['exif'])
key = 'Iptc.Application2.Caption'
del self.metadata[key]
self.failIf(key in self.metadata._keys['iptc'])
self.failIf(key in self.metadata._tags['iptc'])
key = 'Xmp.dc.subject'
del self.metadata[key]
self.failIf(key in self.metadata._keys['xmp'])
self.failIf(key in self.metadata._tags['xmp'])
# Try to delete nonexistent tags
keys = ('Exif.Image.SamplesPerPixel', 'Iptc.Application2.FixtureId',
'Xmp.xmp.Rating', 'Wrong.Noluck.Raise')
for key in keys:
self.failUnlessRaises(KeyError, self.metadata.__delitem__, key)
def test_replace_tag_by_itself(self):
# Test that replacing an existing tag by itself
# doesn’t result in an ugly segmentation fault
# (see https://bugs.launchpad.net/pyexiv2/+bug/622739).
self.metadata.read()
keys = self.metadata.exif_keys + \
self.metadata.iptc_keys + \
self.metadata.xmp_keys
for key in keys:
self.metadata[key] = self.metadata[key]
def test_nonexistent_tag_family(self):
self.metadata.read()
key = 'Bleh.Image.DateTime'
self.failUnlessRaises(KeyError, self.metadata.__getitem__, key)
self.failUnlessRaises(KeyError, self.metadata.__setitem__, key, datetime.date.today())
self.failUnlessRaises(KeyError, self.metadata.__delitem__, key)
##########################
# Test the image comment #
##########################
def test_get_comment(self):
self.metadata.read()
self.failUnlessEqual(self.metadata.comment, 'Hello World!')
def test_set_comment(self):
self.metadata.read()
comment = 'Welcome to the real world.'
self.metadata.comment = comment
self.failUnlessEqual(self.metadata.comment, comment)
self.metadata.comment = None
self.failUnlessEqual(self.metadata.comment, '')
def test_delete_comment(self):
self.metadata.read()
del self.metadata.comment
self.failUnlessEqual(self.metadata.comment, '')
####################
# Test metadata copy
####################
def _set_up_other(self):
self.other = ImageMetadata.from_buffer(EMPTY_JPG_DATA)
def test_copy_metadata(self):
self.metadata.read()
self._set_up_other()
self.other.read()
families = ('exif', 'iptc', 'xmp')
for family in families:
self.failUnlessEqual(getattr(self.other, '%s_keys' % family), [])
self.metadata.copy(self.other)
for family in ('exif', 'iptc', 'xmp'):
self.failUnlessEqual(self.other._keys[family], None)
self.failUnlessEqual(self.other._tags[family], {})
keys = getattr(self.metadata, '%s_keys' % family)
self.failUnlessEqual(getattr(self.other._image, '_%sKeys' % family)(), keys)
self.failUnlessEqual(getattr(self.other, '%s_keys' % family), keys)
for key in self.metadata.exif_keys:
self.failUnlessEqual(self.metadata[key].value, self.other[key].value)
for key in self.metadata.iptc_keys:
self.failUnlessEqual(self.metadata[key].value, self.other[key].value)
for key in self.metadata.xmp_keys:
self.failUnlessEqual(self.metadata[key].value, self.other[key].value)
self.failUnlessEqual(self.metadata.comment, self.other.comment)
#############################
# Test MutableMapping methods
#############################
def _set_up_clean(self):
self.clean = ImageMetadata.from_buffer(EMPTY_JPG_DATA)
def test_mutablemapping(self):
self._set_up_clean()
self.clean.read()
self.assertEqual(len(self.clean), 0)
self.assertTrue('Exif.Image.DateTimeOriginal' not in self.clean)
key = 'Exif.Image.DateTimeOriginal'
correctDate = datetime.datetime(2007,03,11)
incorrectDate = datetime.datetime(2009,03,25)
tag_date = ExifTag(key,correctDate)
false_tag_date = ExifTag(key,incorrectDate)
self.clean[key] = tag_date
self.assertEqual(len(self.clean), 1)
self.assertTrue('Exif.Image.DateTimeOriginal' in self.clean)
self.assertEqual(self.clean.get('Exif.Image.DateTimeOriginal', false_tag_date), tag_date)
self.assertEqual(self.clean.get('Exif.Image.DateTime', tag_date), tag_date)
key = 'Exif.Photo.UserComment'
tag = ExifTag(key,'UserComment')
self.clean[key] = tag
key = 'Iptc.Application2.Caption'
tag = IptcTag(key,['Caption'])
self.clean[key] = tag
key = 'Xmp.dc.subject'
tag = XmpTag(key, ['subject', 'values'])
self.clean[key] = tag
self.assertTrue('Exif.Photo.UserComment' in self.clean)
self.assertTrue('Iptc.Application2.Caption' in self.clean)
self.assertTrue('Xmp.dc.subject' in self.clean)
self.clean.clear()
self.assertEqual(len(self.clean), 0)
self.assertTrue('Exif.Photo.UserComment' not in self.clean)
self.assertTrue('Iptc.Application2.Caption' not in self.clean)
self.assertTrue('Xmp.dc.subject' not in self.clean)
###########################
# Test the EXIF thumbnail #
###########################
def _test_thumbnail_tags(self, there):
keys = ('Exif.Thumbnail.Compression',
'Exif.Thumbnail.JPEGInterchangeFormat',
'Exif.Thumbnail.JPEGInterchangeFormatLength')
for key in keys:
self.assertEqual(key in self.metadata.exif_keys, there)
def test_no_exif_thumbnail(self):
self.metadata.read()
thumb = self.metadata.exif_thumbnail
self.assertEqual(thumb.mime_type, '')
self.assertEqual(thumb.extension, '')
self.assertEqual(thumb.data, '')
self._test_thumbnail_tags(False)
def test_set_exif_thumbnail_from_data(self):
self.metadata.read()
self._test_thumbnail_tags(False)
thumb = self.metadata.exif_thumbnail
thumb.data = EMPTY_JPG_DATA
self.assertEqual(thumb.mime_type, 'image/jpeg')
self.assertEqual(thumb.extension, '.jpg')
self.assertEqual(thumb.data, EMPTY_JPG_DATA)
self._test_thumbnail_tags(True)
def test_set_exif_thumbnail_from_file(self):
fd, pathname = tempfile.mkstemp(suffix='.jpg')
os.write(fd, EMPTY_JPG_DATA)
os.close(fd)
self.metadata.read()
self._test_thumbnail_tags(False)
thumb = self.metadata.exif_thumbnail
thumb.set_from_file(pathname)
os.remove(pathname)
self.assertEqual(thumb.mime_type, 'image/jpeg')
self.assertEqual(thumb.extension, '.jpg')
self.assertEqual(thumb.data, EMPTY_JPG_DATA)
self._test_thumbnail_tags(True)
def test_write_exif_thumbnail_to_file(self):
self.metadata.read()
self._test_thumbnail_tags(False)
thumb = self.metadata.exif_thumbnail
thumb.data = EMPTY_JPG_DATA
fd, pathname = tempfile.mkstemp()
os.close(fd)
os.remove(pathname)
thumb.write_to_file(pathname)
pathname = pathname + thumb.extension
fd = open(pathname, 'rb')
self.assertEqual(fd.read(), EMPTY_JPG_DATA)
fd.close()
os.remove(pathname)
def test_erase_exif_thumbnail(self):
self.metadata.read()
self._test_thumbnail_tags(False)
thumb = self.metadata.exif_thumbnail
thumb.data = EMPTY_JPG_DATA
self.assertEqual(thumb.mime_type, 'image/jpeg')
self.assertEqual(thumb.extension, '.jpg')
self.assertEqual(thumb.data, EMPTY_JPG_DATA)
self._test_thumbnail_tags(True)
thumb.erase()
self.assertEqual(thumb.mime_type, '')
self.assertEqual(thumb.extension, '')
self.assertEqual(thumb.data, '')
self._test_thumbnail_tags(False)
def test_set_exif_thumbnail_from_invalid_data(self):
# No check on the format of the buffer is performed, therefore it will
# always work.
self.metadata.read()
self._test_thumbnail_tags(False)
thumb = self.metadata.exif_thumbnail
thumb.data = 'invalid'
self.assertEqual(thumb.mime_type, 'image/jpeg')
self._test_thumbnail_tags(True)
def test_set_exif_thumbnail_from_inexistent_file(self):
self.metadata.read()
self._test_thumbnail_tags(False)
thumb = self.metadata.exif_thumbnail
fd, pathname = tempfile.mkstemp()
os.close(fd)
os.remove(pathname)
self.failUnlessRaises(IOError, thumb.set_from_file, pathname)
self._test_thumbnail_tags(False)
def test_exif_thumbnail_is_preview(self):
self.metadata.read()
self._test_thumbnail_tags(False)
self.assertEqual(len(self.metadata.previews), 0)
thumb = self.metadata.exif_thumbnail
thumb.data = EMPTY_JPG_DATA
self._test_thumbnail_tags(True)
self.assertEqual(len(self.metadata.previews), 1)
preview = self.metadata.previews[0]
self.assertEqual(thumb.mime_type, preview.mime_type)
self.assertEqual(thumb.extension, preview.extension)
self.assertEqual(thumb.data, preview.data)
#########################
# Test the IPTC charset #
#########################
def test_guess_iptc_charset(self):
# If no charset is defined, exiv2 guesses it from the encoding of the
# metadata.
self.metadata.read()
self.assertEqual(self.metadata.iptc_charset, 'ascii')
self.metadata['Iptc.Application2.City'] = [u'Córdoba']
self.assertEqual(self.metadata.iptc_charset, 'utf-8')
def test_set_iptc_charset_utf8(self):
self.metadata.read()
self.assert_('Iptc.Envelope.CharacterSet' not in self.metadata.iptc_keys)
self.assertEqual(self.metadata.iptc_charset, 'ascii')
values = ('utf-8', 'utf8', 'u8', 'utf', 'utf_8')
for value in values:
self.metadata.iptc_charset = value
self.assertEqual(self.metadata.iptc_charset, 'utf-8')
self.metadata.iptc_charset = value.upper()
self.assertEqual(self.metadata.iptc_charset, 'utf-8')
def test_set_invalid_iptc_charset(self):
self.metadata.read()
self.assert_('Iptc.Envelope.CharacterSet' not in self.metadata.iptc_keys)
values = ('invalid', 'utf-9', '3.14')
for value in values:
self.assertRaises(ValueError, self.metadata.__setattr__,
'iptc_charset', value)
def test_set_unhandled_iptc_charset(self):
# At the moment, the only charset handled is UTF-8.
self.metadata.read()
self.assert_('Iptc.Envelope.CharacterSet' not in self.metadata.iptc_keys)
values = ('ascii', 'iso8859_15', 'shift_jis')
for value in values:
self.assertRaises(ValueError, self.metadata.__setattr__,
'iptc_charset', value)
def test_delete_iptc_charset(self):
self.metadata.read()
key = 'Iptc.Envelope.CharacterSet'
self.assertEqual(self.metadata.iptc_charset, 'ascii')
self.assert_(key not in self.metadata.iptc_keys)
del self.metadata.iptc_charset
self.assertEqual(self.metadata.iptc_charset, 'ascii')
self.assert_(key not in self.metadata.iptc_keys)
self.metadata.iptc_charset = 'utf-8'
self.assertEqual(self.metadata.iptc_charset, 'utf-8')
self.assert_(key in self.metadata.iptc_keys)
del self.metadata.iptc_charset
self.assertEqual(self.metadata.iptc_charset, 'ascii')
self.assert_(key not in self.metadata.iptc_keys)
self.metadata.iptc_charset = 'utf-8'
self.assertEqual(self.metadata.iptc_charset, 'utf-8')
self.assert_(key in self.metadata.iptc_keys)
self.metadata.iptc_charset = None
self.assertEqual(self.metadata.iptc_charset, 'ascii')
self.assert_(key not in self.metadata.iptc_keys) | unknown | codeparrot/codeparrot-clean | ||
---
title: Navigation Blocking
---
# Navigation Blocking
[MODES: framework, data]
<br/>
<br/>
When users are in the middle of a workflow, like filling out an important form, you may want to prevent them from navigating away from the page.
This example will show:
- Setting up a route with a form and action called with a fetcher
- Blocking navigation when the form is dirty
- Showing a confirmation when the user tries to leave the page
## 1. Set up a route with a form
Add a route with the form, we'll use a "contact" route for this example:
```ts filename=routes.ts
import {
type RouteConfig,
index,
route,
} from "@react-router/dev/routes";
export default [
index("routes/home.tsx"),
route("contact", "routes/contact.tsx"),
] satisfies RouteConfig;
```
Add the form to the contact route module:
```tsx filename=routes/contact.tsx
import { useFetcher } from "react-router";
import type { Route } from "./+types/contact";
export async function action({
request,
}: Route.ActionArgs) {
let formData = await request.formData();
let email = formData.get("email");
let message = formData.get("message");
console.log(email, message);
return { ok: true };
}
export default function Contact() {
let fetcher = useFetcher();
return (
<fetcher.Form method="post">
<p>
<label>
Email: <input name="email" type="email" />
</label>
</p>
<p>
<textarea name="message" />
</p>
<p>
<button type="submit">
{fetcher.state === "idle" ? "Send" : "Sending..."}
</button>
</p>
</fetcher.Form>
);
}
```
## 2. Add dirty state and onChange handler
To track the dirty state of the form, we'll use a single boolean and a quick form onChange handler. You may want to track the dirty state differently but this works for this guide.
```tsx filename=routes/contact.tsx lines=[2,8-12]
export default function Contact() {
let [isDirty, setIsDirty] = useState(false);
let fetcher = useFetcher();
return (
<fetcher.Form
method="post"
onChange={(event) => {
let email = event.currentTarget.email.value;
let message = event.currentTarget.message.value;
setIsDirty(Boolean(email || message));
}}
>
{/* existing code */}
</fetcher.Form>
);
}
```
## 3. Block navigation when the form is dirty
```tsx filename=routes/contact.tsx lines=[1,6-8]
import { useBlocker } from "react-router";
export default function Contact() {
let [isDirty, setIsDirty] = useState(false);
let fetcher = useFetcher();
let blocker = useBlocker(
useCallback(() => isDirty, [isDirty]),
);
// ... existing code
}
```
While this will now block a navigation, there's no way for the user to confirm it.
## 4. Show confirmation UI
This uses a simple div, but you may want to use a modal dialog.
```tsx filename=routes/contact.tsx lines=[19-41]
export default function Contact() {
let [isDirty, setIsDirty] = useState(false);
let fetcher = useFetcher();
let blocker = useBlocker(
useCallback(() => isDirty, [isDirty]),
);
return (
<fetcher.Form
method="post"
onChange={(event) => {
let email = event.currentTarget.email.value;
let message = event.currentTarget.message.value;
setIsDirty(Boolean(email || message));
}}
>
{/* existing code */}
{blocker.state === "blocked" && (
<div>
<p>Wait! You didn't send the message yet:</p>
<p>
<button
type="button"
onClick={() => blocker.proceed()}
>
Leave
</button>{" "}
<button
type="button"
onClick={() => blocker.reset()}
>
Stay here
</button>
</p>
</div>
)}
</fetcher.Form>
);
}
```
If the user clicks "leave" then `blocker.proceed()` will proceed with the navigation. If they click "stay here" then `blocker.reset()` will clear the blocker and keep them on the current page.
## 5. Reset the blocker when the action resolves
If the user doesn't click either "leave" or "stay here", then submits the form, the blocker will still be active. Let's reset the blocker when the action resolves with an effect.
```tsx filename=routes/contact.tsx
useEffect(() => {
if (fetcher.data?.ok) {
if (blocker.state === "blocked") {
blocker.reset();
}
}
}, [fetcher.data]);
```
## 6. Clear the form when the action resolves
While unrelated to navigation blocking, let's clear the form when the action resolves with a ref.
```tsx
let formRef = useRef<HTMLFormElement>(null);
// put it on the form
<fetcher.Form
ref={formRef}
method="post"
onChange={(event) => {
// ... existing code
}}
>
{/* existing code */}
</fetcher.Form>;
```
```tsx
useEffect(() => {
if (fetcher.data?.ok) {
// clear the form in the effect
formRef.current?.reset();
if (blocker.state === "blocked") {
blocker.reset();
}
}
}, [fetcher.data]);
```
Alternatively, if a navigation is currently blocked, instead of resetting the blocker, you can proceed through to the blocked navigation.
```tsx
useEffect(() => {
if (fetcher.data?.ok) {
if (blocker.state === "blocked") {
// proceed with the blocked navigation
blocker.proceed();
} else {
formRef.current?.reset();
}
}
}, [fetcher.data]);
```
In this case the user flow is:
- User fills out the form
- User forgets to click "send" and clicks a link instead
- The navigation is blocked, and the confirmation message is shown
- Instead of clicking "leave" or "stay here", the user submits the form
- The user is taken to the requested page | unknown | github | https://github.com/remix-run/react-router | docs/how-to/navigation-blocking.md |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Virtual Me2Me implementation. This script runs and manages the processes
# required for a Virtual Me2Me desktop, which are: X server, X desktop
# session, and Host process.
# This script is intended to run continuously as a background daemon
# process, running under an ordinary (non-root) user account.
import atexit
import base64
import errno
import getpass
import hashlib
import hmac
import json
import logging
import optparse
import os
import random
import signal
import socket
import subprocess
import sys
import tempfile
import time
import urllib2
import uuid
# Local modules
import gaia_auth
import keygen
REMOTING_COMMAND = "remoting_me2me_host"
# Command-line switch for passing the config path to remoting_me2me_host.
HOST_CONFIG_SWITCH_NAME = "host-config"
# Needs to be an absolute path, since the current working directory is changed
# when this process self-daemonizes.
SCRIPT_PATH = os.path.dirname(sys.argv[0])
if SCRIPT_PATH:
SCRIPT_PATH = os.path.abspath(SCRIPT_PATH)
else:
SCRIPT_PATH = os.getcwd()
# These are relative to SCRIPT_PATH.
EXE_PATHS_TO_TRY = [
".",
"../../out/Debug",
"../../out/Release"
]
CONFIG_DIR = os.path.expanduser("~/.config/chrome-remote-desktop")
HOME_DIR = os.environ["HOME"]
X_LOCK_FILE_TEMPLATE = "/tmp/.X%d-lock"
FIRST_X_DISPLAY_NUMBER = 20
X_AUTH_FILE = os.path.expanduser("~/.Xauthority")
os.environ["XAUTHORITY"] = X_AUTH_FILE
# Globals needed by the atexit cleanup() handler.
g_desktops = []
g_pidfile = None
class Authentication:
"""Manage authentication tokens for Chromoting/xmpp"""
def __init__(self, config_file):
self.config_file = config_file
def generate_tokens(self):
"""Prompt for username/password and use them to generate new authentication
tokens.
Raises:
Exception: Failed to get new authentication tokens.
"""
print "Email:",
self.login = raw_input()
password = getpass.getpass("Password: ")
chromoting_auth = gaia_auth.GaiaAuthenticator('chromoting')
self.chromoting_auth_token = chromoting_auth.authenticate(self.login,
password)
xmpp_authenticator = gaia_auth.GaiaAuthenticator('chromiumsync')
self.xmpp_auth_token = xmpp_authenticator.authenticate(self.login,
password)
def load_config(self):
try:
settings_file = open(self.config_file, 'r')
data = json.load(settings_file)
settings_file.close()
self.login = data["xmpp_login"]
self.chromoting_auth_token = data["chromoting_auth_token"]
self.xmpp_auth_token = data["xmpp_auth_token"]
except:
return False
return True
def save_config(self):
data = {
"xmpp_login": self.login,
"chromoting_auth_token": self.chromoting_auth_token,
"xmpp_auth_token": self.xmpp_auth_token,
}
# File will contain private keys, so deny read/write access to others.
old_umask = os.umask(0066)
settings_file = open(self.config_file, 'w')
settings_file.write(json.dumps(data, indent=2))
settings_file.close()
os.umask(old_umask)
class Host:
"""This manages the configuration for a host.
Callers should instantiate a Host object (passing in a filename where the
config will be kept), then should call either of the methods:
* register(auth): Create a new Host configuration and register it
with the Directory Service (the "auth" parameter is used to
authenticate with the Service).
* load_config(): Load a config from disk, with details of an existing Host
registration.
After calling register() (or making any config changes) the method
save_config() should be called to save the details to disk.
"""
server = 'www.googleapis.com'
url = 'https://' + server + '/chromoting/v1/@me/hosts'
def __init__(self, config_file):
self.config_file = config_file
self.host_id = str(uuid.uuid1())
self.host_name = socket.gethostname()
self.host_secret_hash = None
self.private_key = None
def register(self, auth):
"""Generates a private key for the stored |host_id|, and registers it with
the Directory service.
Args:
auth: Authentication object with credentials for authenticating with the
Directory service.
Raises:
urllib2.HTTPError: An error occurred talking to the Directory server
(for example, if the |auth| credentials were rejected).
"""
logging.info("HostId: " + self.host_id)
logging.info("HostName: " + self.host_name)
logging.info("Generating RSA key pair...")
(self.private_key, public_key) = keygen.generateRSAKeyPair()
logging.info("Done")
json_data = {
"data": {
"hostId": self.host_id,
"hostName": self.host_name,
"publicKey": public_key,
}
}
params = json.dumps(json_data)
headers = {
"Authorization": "GoogleLogin auth=" + auth.chromoting_auth_token,
"Content-Type": "application/json",
}
request = urllib2.Request(self.url, params, headers)
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
logging.info("Registering host with directory service...")
res = urllib2.urlopen(request)
data = res.read()
logging.info("Done")
def ask_pin(self):
print \
"""Chromoting host supports PIN-based authentication, but it doesn't
work with Chrome 16 and Chrome 17 clients. Leave the PIN empty if you
need to use Chrome 16 or Chrome 17 clients. If you only use Chrome 18
or above, please set a non-empty PIN. You can change PIN later using
-p flag."""
while 1:
pin = getpass.getpass("Host PIN: ")
if len(pin) == 0:
print "Using empty PIN"
break
if len(pin) < 4:
print "PIN must be at least 4 characters long."
continue
pin2 = getpass.getpass("Confirm host PIN: ")
if pin2 != pin:
print "PINs didn't match. Please try again."
continue
break
self.set_pin(pin)
def set_pin(self, pin):
if pin == "":
self.host_secret_hash = "plain:"
else:
self.host_secret_hash = "hmac:" + base64.b64encode(
hmac.new(str(self.host_id), pin, hashlib.sha256).digest())
def is_pin_set(self):
return self.host_secret_hash
def load_config(self):
try:
settings_file = open(self.config_file, 'r')
data = json.load(settings_file)
settings_file.close()
except:
logging.info("Failed to load: " + self.config_file)
return False
self.host_id = data["host_id"]
self.host_name = data["host_name"]
self.host_secret_hash = data.get("host_secret_hash")
self.private_key = data["private_key"]
return True
def save_config(self):
data = {
"host_id": self.host_id,
"host_name": self.host_name,
"host_secret_hash": self.host_secret_hash,
"private_key": self.private_key,
}
if self.host_secret_hash:
data["host_secret_hash"] = self.host_secret_hash
old_umask = os.umask(0066)
settings_file = open(self.config_file, 'w')
settings_file.write(json.dumps(data, indent=2))
settings_file.close()
os.umask(old_umask)
class Desktop:
"""Manage a single virtual desktop"""
def __init__(self, width, height):
self.x_proc = None
self.session_proc = None
self.host_proc = None
self.width = width
self.height = height
g_desktops.append(self)
@staticmethod
def get_unused_display_number():
"""Return a candidate display number for which there is currently no
X Server lock file"""
display = FIRST_X_DISPLAY_NUMBER
while os.path.exists(X_LOCK_FILE_TEMPLATE % display):
display += 1
return display
def launch_x_server(self, extra_x_args):
display = self.get_unused_display_number()
ret_code = subprocess.call("xauth add :%d . `mcookie`" % display,
shell=True)
if ret_code != 0:
raise Exception("xauth failed with code %d" % ret_code)
logging.info("Starting Xvfb on display :%d" % display);
screen_option = "%dx%dx24" % (self.width, self.height)
self.x_proc = subprocess.Popen(["Xvfb", ":%d" % display,
"-auth", X_AUTH_FILE,
"-nolisten", "tcp",
"-screen", "0", screen_option
] + extra_x_args)
if not self.x_proc.pid:
raise Exception("Could not start Xvfb.")
# Create clean environment for new session, so it is cleanly separated from
# the user's console X session.
self.child_env = {
"DISPLAY": ":%d" % display,
"REMOTING_ME2ME_SESSION": "1" }
for key in [
"HOME",
"LANG",
"LOGNAME",
"PATH",
"SHELL",
"USER",
"USERNAME"]:
if os.environ.has_key(key):
self.child_env[key] = os.environ[key]
# Wait for X to be active.
for test in range(5):
proc = subprocess.Popen("xdpyinfo > /dev/null", env=self.child_env,
shell=True)
pid, retcode = os.waitpid(proc.pid, 0)
if retcode == 0:
break
time.sleep(0.5)
if retcode != 0:
raise Exception("Could not connect to Xvfb.")
else:
logging.info("Xvfb is active.")
def launch_x_session(self):
# Start desktop session
# The /dev/null input redirection is necessary to prevent Xsession from
# reading from stdin. If this code runs as a shell background job in a
# terminal, any reading from stdin causes the job to be suspended.
# Daemonization would solve this problem by separating the process from the
# controlling terminal.
#
# This assumes that GDM is installed and configured on the system.
self.session_proc = subprocess.Popen("/etc/gdm/Xsession",
stdin=open(os.devnull, "r"),
cwd=HOME_DIR,
env=self.child_env)
if not self.session_proc.pid:
raise Exception("Could not start X session")
def launch_host(self, host):
# Start remoting host
args = [locate_executable(REMOTING_COMMAND),
"--%s=%s" % (HOST_CONFIG_SWITCH_NAME, host.config_file)]
self.host_proc = subprocess.Popen(args, env=self.child_env)
if not self.host_proc.pid:
raise Exception("Could not start remoting host")
class PidFile:
"""Class to allow creating and deleting a file which holds the PID of the
running process. This is used to detect if a process is already running, and
inform the user of the PID. On process termination, the PID file is
deleted.
Note that PID files are not truly atomic or reliable, see
http://mywiki.wooledge.org/ProcessManagement for more discussion on this.
So this class is just to prevent the user from accidentally running two
instances of this script, and to report which PID may be the other running
instance.
"""
def __init__(self, filename):
"""Create an object to manage a PID file. This does not create the PID
file itself."""
self.filename = filename
self.created = False
def check(self):
"""Checks current status of the process.
Returns:
Tuple (running, pid):
|running| is True if the daemon is running.
|pid| holds the process ID of the running instance if |running| is True.
If the PID file exists but the PID couldn't be read from the file
(perhaps if the data hasn't been written yet), 0 is returned.
Raises:
IOError: Filesystem error occurred.
"""
if os.path.exists(self.filename):
pid_file = open(self.filename, 'r')
file_contents = pid_file.read()
pid_file.close()
try:
pid = int(file_contents)
except ValueError:
return True, 0
# Test to see if there's a process currently running with that PID.
# If there is no process running, the existing PID file is definitely
# stale and it is safe to overwrite it. Otherwise, report the PID as
# possibly a running instance of this script.
if os.path.exists("/proc/%d" % pid):
return True, pid
return False, 0
def create(self):
"""Creates an empty PID file."""
pid_file = open(self.filename, 'w')
pid_file.close()
self.created = True
def write_pid(self):
"""Write the current process's PID to the PID file.
This is done separately from create() as this needs to be called
after any daemonization, when the correct PID becomes known. But
check() and create() has to happen before daemonization, so that
if another instance is already running, this fact can be reported
to the user's terminal session. This also avoids corrupting the
log file of the other process, since daemonize() would create a
new log file.
"""
pid_file = open(self.filename, 'w')
pid_file.write('%d\n' % os.getpid())
pid_file.close()
self.created = True
def delete_file(self):
"""Delete the PID file if it was created by this instance.
This is called on process termination.
"""
if self.created:
os.remove(self.filename)
def locate_executable(exe_name):
for path in EXE_PATHS_TO_TRY:
exe_path = os.path.join(SCRIPT_PATH, path, exe_name)
if os.path.exists(exe_path):
return exe_path
raise Exception("Could not locate executable '%s'" % exe_name)
def daemonize(log_filename):
"""Background this process and detach from controlling terminal, redirecting
stdout/stderr to |log_filename|."""
# TODO(lambroslambrou): Having stdout/stderr redirected to a log file is not
# ideal - it could create a filesystem DoS if the daemon or a child process
# were to write excessive amounts to stdout/stderr. Ideally, stdout/stderr
# should be redirected to a pipe or socket, and a process at the other end
# should consume the data and write it to a logging facility which can do
# data-capping or log-rotation. The 'logger' command-line utility could be
# used for this, but it might cause too much syslog spam.
# Create new (temporary) file-descriptors before forking, so any errors get
# reported to the main process and set the correct exit-code.
# The mode is provided, since Python otherwise sets a default mode of 0777,
# which would result in the new file having permissions of 0777 & ~umask,
# possibly leaving the executable bits set.
devnull_fd = os.open(os.devnull, os.O_RDONLY)
log_fd = os.open(log_filename, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0600)
pid = os.fork()
if pid == 0:
# Child process
os.setsid()
# The second fork ensures that the daemon isn't a session leader, so that
# it doesn't acquire a controlling terminal.
pid = os.fork()
if pid == 0:
# Grandchild process
pass
else:
# Child process
os._exit(0)
else:
# Parent process
os._exit(0)
logging.info("Daemon process running, logging to '%s'" % log_filename)
os.chdir(HOME_DIR)
# Copy the file-descriptors to create new stdin, stdout and stderr. Note
# that dup2(oldfd, newfd) closes newfd first, so this will close the current
# stdin, stdout and stderr, detaching from the terminal.
os.dup2(devnull_fd, sys.stdin.fileno())
os.dup2(log_fd, sys.stdout.fileno())
os.dup2(log_fd, sys.stderr.fileno())
# Close the temporary file-descriptors.
os.close(devnull_fd)
os.close(log_fd)
def cleanup():
logging.info("Cleanup.")
if g_pidfile:
try:
g_pidfile.delete_file()
except Exception, e:
logging.error("Unexpected error deleting PID file: " + str(e))
for desktop in g_desktops:
if desktop.x_proc:
logging.info("Terminating Xvfb")
desktop.x_proc.terminate()
def reload_config():
for desktop in g_desktops:
if desktop.host_proc:
# Terminating the Host will cause the main loop to spawn another
# instance, which will read any changes made to the Host config file.
desktop.host_proc.terminate()
def signal_handler(signum, stackframe):
if signum == signal.SIGUSR1:
logging.info("SIGUSR1 caught, reloading configuration.")
reload_config()
else:
# Exit cleanly so the atexit handler, cleanup(), gets called.
raise SystemExit
def main():
parser = optparse.OptionParser(
"Usage: %prog [options] [ -- [ X server options ] ]")
parser.add_option("-s", "--size", dest="size", default="1280x1024",
help="dimensions of virtual desktop (default: %default)")
parser.add_option("-f", "--foreground", dest="foreground", default=False,
action="store_true",
help="don't run as a background daemon")
parser.add_option("-k", "--stop", dest="stop", default=False,
action="store_true",
help="stop the daemon currently running")
parser.add_option("-p", "--new-pin", dest="new_pin", default=False,
action="store_true",
help="set new PIN before starting the host")
parser.add_option("", "--check-running", dest="check_running", default=False,
action="store_true",
help="return 0 if the daemon is running, or 1 otherwise")
parser.add_option("", "--explicit-config", dest="explicit_config",
help="explicitly specify content of the config")
(options, args) = parser.parse_args()
size_components = options.size.split("x")
if len(size_components) != 2:
parser.error("Incorrect size format, should be WIDTHxHEIGHT");
host_hash = hashlib.md5(socket.gethostname()).hexdigest()
pid_filename = os.path.join(CONFIG_DIR, "host#%s.pid" % host_hash)
if options.check_running:
running, pid = PidFile(pid_filename).check()
return 0 if (running and pid != 0) else 1
if options.stop:
running, pid = PidFile(pid_filename).check()
if not running:
print "The daemon currently is not running"
else:
print "Killing process %s" % pid
os.kill(pid, signal.SIGTERM)
return 0
try:
width = int(size_components[0])
height = int(size_components[1])
# Enforce minimum desktop size, as a sanity-check. The limit of 100 will
# detect typos of 2 instead of 3 digits.
if width < 100 or height < 100:
raise ValueError
except ValueError:
parser.error("Width and height should be 100 pixels or greater")
atexit.register(cleanup)
for s in [signal.SIGHUP, signal.SIGINT, signal.SIGTERM, signal.SIGUSR1]:
signal.signal(s, signal_handler)
# Ensure full path to config directory exists.
if not os.path.exists(CONFIG_DIR):
os.makedirs(CONFIG_DIR, mode=0700)
if options.explicit_config:
for file_name in ["auth.json", "host#%s.json" % host_hash]:
settings_file = open(os.path.join(CONFIG_DIR, file_name), 'w')
settings_file.write(options.explicit_config)
settings_file.close()
auth = Authentication(os.path.join(CONFIG_DIR, "auth.json"))
need_auth_tokens = not auth.load_config()
host = Host(os.path.join(CONFIG_DIR, "host#%s.json" % host_hash))
register_host = not host.load_config()
# Outside the loop so user doesn't get asked twice.
if register_host:
host.ask_pin()
elif options.new_pin or not host.is_pin_set():
host.ask_pin()
host.save_config()
running, pid = PidFile(pid_filename).check()
if running and pid != 0:
os.kill(pid, signal.SIGUSR1)
print "The running instance has been updated with the new PIN."
return 0
if not options.explicit_config:
# The loop is to deal with the case of registering a new Host with
# previously-saved auth tokens (from a previous run of this script), which
# may require re-prompting for username & password.
while True:
try:
if need_auth_tokens:
auth.generate_tokens()
auth.save_config()
need_auth_tokens = False
except Exception:
logging.error("Authentication failed")
return 1
try:
if register_host:
host.register(auth)
host.save_config()
except urllib2.HTTPError, err:
if err.getcode() == 401:
# Authentication failed - re-prompt for username & password.
need_auth_tokens = True
continue
else:
# Not an authentication error.
logging.error("Directory returned error: " + str(err))
logging.error(err.read())
return 1
# |auth| and |host| are both set up, so break out of the loop.
break
global g_pidfile
g_pidfile = PidFile(pid_filename)
running, pid = g_pidfile.check()
if running:
print "An instance of this script is already running."
print "Use the -k flag to terminate the running instance."
print "If this isn't the case, delete '%s' and try again." % pid_filename
return 1
g_pidfile.create()
# daemonize() must only be called after prompting for user/password, as the
# process will become detached from the controlling terminal.
if not options.foreground:
log_file = tempfile.NamedTemporaryFile(prefix="me2me_host_", delete=False)
daemonize(log_file.name)
g_pidfile.write_pid()
logging.info("Using host_id: " + host.host_id)
desktop = Desktop(width, height)
# Remember the time when the last session was launched, in order to enforce
# a minimum time between launches. This avoids spinning in case of a
# misconfigured system, or other error that prevents a session from starting
# properly.
last_launch_time = 0
while True:
# If the session process stops running (e.g. because the user logged out),
# the X server should be reset and the session restarted, to provide a
# completely clean new session.
if desktop.session_proc is None and desktop.x_proc is not None:
logging.info("Terminating X server")
desktop.x_proc.terminate()
if desktop.x_proc is None:
if desktop.session_proc is not None:
# The X session would probably die soon if the X server is not
# running (because of the loss of the X connection). Terminate it
# anyway, to be sure.
logging.info("Terminating X session")
desktop.session_proc.terminate()
else:
# Neither X server nor X session are running.
elapsed = time.time() - last_launch_time
if elapsed < 60:
logging.error("The session lasted less than 1 minute. Waiting " +
"before starting new session.")
time.sleep(60 - elapsed)
logging.info("Launching X server and X session")
last_launch_time = time.time()
desktop.launch_x_server(args)
desktop.launch_x_session()
if desktop.host_proc is None:
logging.info("Launching host process")
desktop.launch_host(host)
try:
pid, status = os.wait()
except OSError, e:
if e.errno == errno.EINTR:
# Retry on EINTR, which can happen if a signal such as SIGUSR1 is
# received.
continue
else:
# Anything else is an unexpected error.
raise
logging.info("wait() returned (%s,%s)" % (pid, status))
# When os.wait() notifies that a process has terminated, any Popen instance
# for that process is no longer valid. Reset any affected instance to
# None.
if desktop.x_proc is not None and pid == desktop.x_proc.pid:
logging.info("X server process terminated")
desktop.x_proc = None
if desktop.session_proc is not None and pid == desktop.session_proc.pid:
logging.info("Session process terminated")
desktop.session_proc = None
if desktop.host_proc is not None and pid == desktop.host_proc.pid:
logging.info("Host process terminated")
desktop.host_proc = None
# These exit-codes must match the ones used by the host.
# See remoting/host/constants.h.
# Delete the host or auth configuration depending on the returned error
# code, so the next time this script is run, a new configuration
# will be created and registered.
if os.WEXITSTATUS(status) == 2:
logging.info("Host configuration is invalid - exiting.")
os.remove(auth.config_file)
os.remove(host.config_file)
return 0
elif os.WEXITSTATUS(status) == 3:
logging.info("Host ID has been deleted - exiting.")
os.remove(host.config_file)
return 0
elif os.WEXITSTATUS(status) == 4:
logging.info("OAuth credentials are invalid - exiting.")
os.remove(auth.config_file)
return 0
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
sys.exit(main()) | unknown | codeparrot/codeparrot-clean | ||
__author__ = 'Thorsten Sick'
import svgwrite
class Card():
def __init__(self, offset=(0,0), size=(105,148), lines = 8, punchbox=((0,75),(100,140))):
""" A Punchcard
@param offset: Offset onsheet. That way I can print several cards on one sheet. Maybe later laser cut them
@param size: Size of the card itself. in mm
@param lines: Number of lines of holes. Each one will be 8 holes wide. That is 8 Bytes total for 8 lines
@param punchbox: Coordinates for the box that contains the punch holes. In mm
"""
self.size = size
self.offset = offset
self.lines = lines
self.punchbox = punchbox
self.svg_document = None
def draw_hole(self, center = (0,0), radius="2mm"):
""" Draw a hole at the center point (in cm)
@param center: center position in mm
@param radius: Radius of the hole
"""
px1 = str(center[0]) + "mm"
py1 = str(center[1]) + "mm"
self.svg_document.add(self.svg_document.circle(center = (px1, py1),
r = radius,
stroke_width = "1",
stroke = "black",
#fill = "rgb(255,255,0)"
)
)
def draw_punch_line(self, number, holes=0, radius = "2mm"):
""" Draws a line of holes
@param number: Number of the line. 8 lines, 0-7
@param holes: id that should be printed. Max. 255
"""
if (holes>255) or (holes < 0):
#error
return
hstr = bin(holes)[2:]
hstr = "0"*(8-len(hstr))+hstr
print (hstr)
vsize = self.punchbox[1][1] - self.punchbox[0][1]
hsize = self.punchbox[1][0] - self.punchbox[0][0]
vdist = vsize / 9
hdist = hsize / 9
y = self.punchbox[0][1] + self.offset[1] + vdist * (number + 1)
for i in range (0,8):
x = self.punchbox[0][0] + self.offset[0] + hdist * (i + 1)
if hstr[i] == "1":
self.draw_hole((x,y), radius)
def print_text(self, text, pos, pixel="12px", font="Arial"):
self.svg_document.add(self.svg_document.text(text,
insert = (pos[0], pos[1]),
style = "font-size:%s; font-family:%s"%(pixel, font)))
def print_heading(self, text, pixel="20px", font="1942 report"):
""" Print Heading
"""
self.print_text(text, ("5mm","10mm"), pixel, font)
def print_playlist(self, text, pixel="12px", font = "1942 report"):
""" A list of strings printed at the left side of the card
@param text: A list of strings. each item is a line.
"""
y = 15
offset = 3
for line in text:
self.print_text(line, ("10mm", str(y) + "mm"), pixel=pixel, font = font)
y += offset
def generate(self, card):
self.svg_document = svgwrite.Drawing(filename = card["filename"],
size = (str(self.size[0])+"mm", str(self.size[1])+"mm"))
# Draw punchbox
px1 = str(self.offset[0] + self.punchbox[0][0]) + "mm"
py1 = str(self.offset[1] + self.punchbox[0][1]) + "mm"
pxsize = str(self.punchbox[1][0] - self.punchbox[0][0]) + "mm"
pysize = str(self.punchbox[1][1] - self.punchbox[0][1]) + "mm"
# Draw first hole
line = 0
for i in card["ids"]:
self.draw_punch_line(line,i)
line += 1
self.print_heading("%s %s %s" % (card["genre"], card["artist"], card["album"]))
self.print_playlist(card["songs"])
print(self.svg_document.tostring())
self.svg_document.save()
card = { "album":"S&M",
"artist":"Metallica",
"genre": "Metal",
"songs":["Enter Sandman","Nothing Else Matters", "Call of Kthulhu"],
"ids":[255,2,3,4,5,6,7,8],
"filename": "test-svgwrite.svg"
}
c = Card()
c.generate(card) | unknown | codeparrot/codeparrot-clean | ||
<!--
Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ このファイルはMarkdownですが、Hugging Faceのドキュメントビルダー(MDXに類似)向けの特定の構文を含んでいるため、Markdownビューアーで適切にレンダリングされないことがあります。
-->
# Share a Model
最後の2つのチュートリアルでは、PyTorch、Keras、および🤗 Accelerateを使用してモデルをファインチューニングする方法を示しました。次のステップは、モデルをコミュニティと共有することです!Hugging Faceでは、知識とリソースを公開的に共有し、人工知能を誰にでも提供することを信じています。他の人々が時間とリソースを節約できるように、モデルをコミュニティと共有することを検討することをお勧めします。
このチュートリアルでは、訓練済みまたはファインチューニングされたモデルを[Model Hub](https://huggingface.co/models)に共有する2つの方法を学びます:
- プログラムでファイルをHubにプッシュする。
- ウェブインターフェースを使用してファイルをHubにドラッグアンドドロップする。
<iframe width="560" height="315" src="https://www.youtube.com/embed/XvSGPZFEjDY" title="YouTube video player"
frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope;
picture-in-picture" allowfullscreen></iframe>
<Tip>
コミュニティとモデルを共有するには、[huggingface.co](https://huggingface.co/join)でアカウントが必要です。既存の組織に参加したり、新しい組織を作成したりすることもできます。
</Tip>
## Repository Features
Model Hub上の各リポジトリは、通常のGitHubリポジトリのように動作します。リポジトリはバージョニング、コミット履歴、違いの視覚化の機能を提供します。
Model Hubの組み込みバージョニングはgitおよび[git-lfs](https://git-lfs.github.com/)に基づいています。言い換えれば、モデルを1つのリポジトリとして扱うことができ、より大きなアクセス制御とスケーラビリティを実現します。バージョン管理には*リビジョン*があり、コミットハッシュ、タグ、またはブランチ名で特定のモデルバージョンをピン留めする方法です。
その結果、`revision`パラメータを使用して特定のモデルバージョンをロードできます:
```py
>>> model = AutoModel.from_pretrained(
... "julien-c/EsperBERTo-small", revision="4c77982" # タグ名、またはブランチ名、またはコミットハッシュ
... )
```
ファイルはリポジトリ内で簡単に編集でき、コミット履歴と差分を表示できます:

## Set Up
モデルをHubに共有する前に、Hugging Faceの認証情報が必要です。ターミナルへのアクセス権がある場合、🤗 Transformersがインストールされている仮想環境で以下のコマンドを実行します。これにより、アクセストークンがHugging Faceのキャッシュフォルダに保存されます(デフォルトでは `~/.cache/` に保存されます):
```bash
hf auth login
```
JupyterやColaboratoryのようなノートブックを使用している場合、[`huggingface_hub`](https://huggingface.co/docs/hub/adding-a-library)ライブラリがインストールされていることを確認してください。
このライブラリを使用すると、Hubとプログラム的に対話できます。
```bash
pip install huggingface_hub
```
次に、`notebook_login`を使用してHubにサインインし、[こちらのリンク](https://huggingface.co/settings/token)にアクセスしてログインに使用するトークンを生成します:
```python
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Convert a Model for all frameworks
異なるフレームワークで作業している他のユーザーがあなたのモデルを使用できるようにするために、
PyTorchおよびTensorFlowのチェックポイントでモデルを変換してアップロードすることをお勧めします。
このステップをスキップすると、ユーザーは異なるフレームワークからモデルをロードできますが、
モデルをオンザフライで変換する必要があるため、遅くなります。
別のフレームワーク用にチェックポイントを変換することは簡単です。
PyTorchとTensorFlowがインストールされていることを確認してください(インストール手順については[こちら](installation)を参照)し、
その後、他のフレームワーク向けに特定のタスク用のモデルを見つけます。
TensorFlowからPyTorchにチェックポイントを変換するには、`from_tf=True`を指定します:
```python
>>> pt_model = DistilBertForSequenceClassification.from_pretrained("path/to/awesome-name-you-picked", from_tf=True)
>>> pt_model.save_pretrained("path/to/awesome-name-you-picked")
```
## Push a model during traning
<Youtube id="Z1-XMy-GNLQ"/>
モデルをHubにプッシュすることは、追加のパラメーターまたはコールバックを追加するだけで簡単です。
[ファインチューニングチュートリアル](training)から思い出してください、[`TrainingArguments`]クラスはハイパーパラメーターと追加のトレーニングオプションを指定する場所です。
これらのトレーニングオプションの1つに、モデルを直接Hubにプッシュする機能があります。[`TrainingArguments`]で`push_to_hub=True`を設定します:
```py
>>> training_args = TrainingArguments(output_dir="my-awesome-model", push_to_hub=True)
```
Pass your training arguments as usual to [`Trainer`]:
```py
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=small_train_dataset,
... eval_dataset=small_eval_dataset,
... compute_metrics=compute_metrics,
... )
```
[`Trainer`]に通常通りトレーニング引数を渡します:
```py
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=small_train_dataset,
... eval_dataset=small_eval_dataset,
... compute_metrics=compute_metrics,
... )
```
ファインチューニングが完了したら、[`Trainer`]で[`~transformers.Trainer.push_to_hub`]を呼び出して、トレーニング済みモデルをHubにプッシュします。🤗 Transformersは、トレーニングのハイパーパラメータ、トレーニング結果、およびフレームワークのバージョンを自動的にモデルカードに追加します!
```py
>>> trainer.push_to_hub()
```
## `push_to_hub` 関数を使用する
また、モデルを直接Hubにアップロードするために、`push_to_hub` を呼び出すこともできます。
`push_to_hub` でモデル名を指定します:
```py
>>> pt_model.push_to_hub("my-awesome-model")
```
これにより、ユーザー名の下にモデル名 `my-awesome-model` を持つリポジトリが作成されます。
ユーザーは、`from_pretrained` 関数を使用してモデルをロードできます:
```py
>>> from transformers import AutoModel
>>> model = AutoModel.from_pretrained("your_username/my-awesome-model")
```
組織に所属し、モデルを組織名のもとにプッシュしたい場合、`repo_id` にそれを追加してください:
```python
>>> pt_model.push_to_hub("my-awesome-org/my-awesome-model")
```
`push_to_hub`関数は、モデルリポジトリに他のファイルを追加するためにも使用できます。例えば、トークナイザをモデルリポジトリに追加します:
```py
>>> tokenizer.push_to_hub("my-awesome-model")
```
あるいは、ファインチューニングされたPyTorchモデルのTensorFlowバージョンを追加したいかもしれません:
```python
>>> tf_model.push_to_hub("my-awesome-model")
```
Hugging Faceプロフィールに移動すると、新しく作成したモデルリポジトリが表示されるはずです。**Files**タブをクリックすると、リポジトリにアップロードしたすべてのファイルが表示されます。
リポジトリにファイルを作成およびアップロードする方法の詳細については、Hubドキュメンテーション[こちら](https://huggingface.co/docs/hub/how-to-upstream)を参照してください。
## Upload with the web interface
コードを書かずにモデルをアップロードしたいユーザーは、Hubのウェブインターフェースを使用してモデルをアップロードできます。[huggingface.co/new](https://huggingface.co/new)を訪れて新しいリポジトリを作成します:

ここから、モデルに関するいくつかの情報を追加します:
- リポジトリの**所有者**を選択します。これはあなた自身または所属している組織のいずれかです。
- モデルの名前を選択します。これはリポジトリの名前にもなります。
- モデルが公開か非公開かを選択します。
- モデルのライセンス使用方法を指定します。
その後、**Files**タブをクリックし、**Add file**ボタンをクリックしてリポジトリに新しいファイルをアップロードします。次に、ファイルをドラッグアンドドロップしてアップロードし、コミットメッセージを追加します。

## Add a model card
ユーザーがモデルの機能、制限、潜在的な偏り、倫理的な考慮事項を理解できるようにするために、モデルリポジトリにモデルカードを追加してください。モデルカードは`README.md`ファイルで定義されます。モデルカードを追加する方法:
* 手動で`README.md`ファイルを作成およびアップロードする。
* モデルリポジトリ内の**Edit model card**ボタンをクリックする。
モデルカードに含めるべき情報の例については、DistilBert [モデルカード](https://huggingface.co/distilbert/distilbert-base-uncased)をご覧ください。`README.md`ファイルで制御できる他のオプション、例えばモデルの炭素フットプリントやウィジェットの例などについての詳細は、[こちらのドキュメンテーション](https://huggingface.co/docs/hub/models-cards)を参照してください。 | unknown | github | https://github.com/huggingface/transformers | docs/source/ja/model_sharing.md |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Setup File V1.2
from distutils.core import setup
setup(
name="autokey plus",
version="1.0.0",
author="Darren Rainey",
author_email="darren@darrenraineys.co.uk",
url="https://github.com/DarrenRainey/autokey/",
license="GPL v3",
description="Desktop automation utility",
long_description="""AutoKey is a desktop automation utility for Linux and X11. It allows
the automation of virtually any task by responding to typed abbreviations and hotkeys. It
offers a full-featured GUI that makes it highly accessible for novices, as well as a scripting
interface offering the full flexibility and power of the Python language.""",
#py_modules=["autokey", "configurationmanager", "expansionservice", "interface",
# "iomediator", "phrase", "phrasemenu", "ui"],
package_dir={"autokey": "src/lib"},
packages=["autokey", "autokey.gtkui", "autokey.qtui"],
package_data={"autokey.qtui" : ["data/*"],
"autokey.gtkui" : ["data/*"]},
data_files=[("/usr/share/icons/hicolor/scalable/apps", ["config/autokey.svg", "config/autokey.png", "config/autokey-status.svg", "config/autokey-status-dark.svg", "config/autokey-status-error.svg"]),
("/usr/share/icons/Humanity/scalable/apps", ["config/Humanity/autokey-status.svg", "config/Humanity/autokey-status-error.svg"]),
("/usr/share/icons/ubuntu-mono-dark/apps/48", ["config/ubuntu-mono-dark/autokey-status.svg", "config/ubuntu-mono-dark/autokey-status-error.svg"]),
("/usr/share/icons/ubuntu-mono-light/apps/48", ["config/ubuntu-mono-light/autokey-status.svg", "config/ubuntu-mono-light/autokey-status-error.svg"]),
("/usr/share/applications", ["config/autokey-qt.desktop", "config/autokey-gtk.desktop"]),
('share/man/man1/', ['doc/man/autokey-qt.1', 'doc/man/autokey-gtk.1', 'doc/man/autokey-run.1']),
('/usr/share/kde4/apps/autokey' , ['config/autokeyui.rc'])],
scripts=['autokey-qt', 'autokey-gtk', 'autokey-run']
#packages=["plugin"]
) | unknown | codeparrot/codeparrot-clean | ||
// Package urlutil provides helper function to check if a given build-context
// location should be considered a URL or a remote Git repository.
//
// This package is specifically written for use with docker build contexts, and
// should not be used as a general-purpose utility.
package urlutil
import (
"strings"
"github.com/moby/moby/v2/daemon/internal/lazyregexp"
)
// urlPathWithFragmentSuffix matches fragments to use as Git reference and build
// context from the Git repository. See IsGitURL for details.
var urlPathWithFragmentSuffix = lazyregexp.New(`\.git(?:#.+)?$`)
// IsURL returns true if the provided str is an HTTP(S) URL by checking if it
// has a http:// or https:// scheme. No validation is performed to verify if the
// URL is well-formed.
func IsURL(str string) bool {
return strings.HasPrefix(str, "https://") || strings.HasPrefix(str, "http://")
}
// IsGitURL returns true if the provided str is a remote git repository "URL".
//
// This function only performs a rudimentary check (no validation is performed
// to ensure the URL is well-formed), and is written specifically for use with
// docker build, with some logic for backward compatibility with older versions
// of docker: do not use this function as a general-purpose utility.
//
// The following patterns are considered to be a Git URL:
//
// - https://(.*).git(?:#.+)?$ git repository URL with optional fragment, as known to be used by GitHub and GitLab.
// - http://(.*).git(?:#.+)?$ same, but non-TLS
// - git://(.*) URLs using git:// scheme
// - git@(.*)
// - github.com/ see description below
//
// The github.com/ prefix is a special case used to treat context-paths
// starting with "github.com/" as a git URL if the given path does not
// exist locally. The "github.com/" prefix is kept for backward compatibility,
// and is a legacy feature.
//
// Going forward, no additional prefixes should be added, and users should
// be encouraged to use explicit URLs (https://github.com/user/repo.git) instead.
//
// Note that IsGitURL does not check if "github.com/" prefixes exist as a local
// path. Code using this function should check if the path exists locally before
// using it as a URL.
//
// # Fragments
//
// Git URLs accept context configuration in their fragment section, separated by
// a colon (`:`). The first part represents the reference to check out, and can
// be either a branch, a tag, or a remote reference. The second part represents
// a subdirectory inside the repository to use as the build context.
//
// For example,the following URL uses a directory named "docker" in the branch
// "container" in the https://github.com/myorg/my-repo.git repository:
//
// https://github.com/myorg/my-repo.git#container:docker
//
// The following table represents all the valid suffixes with their build
// contexts:
//
// | Build Syntax Suffix | Git reference used | Build Context Used |
// |--------------------------------|----------------------|--------------------|
// | my-repo.git | refs/heads/master | / |
// | my-repo.git#mytag | refs/tags/my-tag | / |
// | my-repo.git#mybranch | refs/heads/my-branch | / |
// | my-repo.git#pull/42/head | refs/pull/42/head | / |
// | my-repo.git#:directory | refs/heads/master | /directory |
// | my-repo.git#master:directory | refs/heads/master | /directory |
// | my-repo.git#mytag:directory | refs/tags/my-tag | /directory |
// | my-repo.git#mybranch:directory | refs/heads/my-branch | /directory |
func IsGitURL(str string) bool {
if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) {
return true
}
for _, prefix := range []string{"git://", "github.com/", "git@"} {
if strings.HasPrefix(str, prefix) {
return true
}
}
return false
} | go | github | https://github.com/moby/moby | daemon/builder/remotecontext/urlutil/urlutil.go |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Saves out a .wav file with synthesized conversational data and labels.
The best way to estimate the real-world performance of an audio recognition
model is by running it against a continuous stream of data, the way that it
would be used in an application. Training evaluations are only run against
discrete individual samples, so the results aren't as realistic.
To make it easy to run evaluations against audio streams, this script uses
samples from the testing partition of the data set, mixes them in at random
positions together with background noise, and saves out the result as one long
audio file.
Here's an example of generating a test file:
bazel run tensorflow/examples/speech_commands:generate_streaming_test_wav -- \
--data_dir=/tmp/my_wavs --background_dir=/tmp/my_backgrounds \
--background_volume=0.1 --test_duration_seconds=600 \
--output_audio_file=/tmp/streaming_test.wav \
--output_labels_file=/tmp/streaming_test_labels.txt
Once you've created a streaming audio file, you can then use the
test_streaming_accuracy tool to calculate accuracy metrics for a model.
"""
import argparse
import math
import sys
import numpy as np
import tensorflow as tf
import input_data
import models
FLAGS = None
def mix_in_audio_sample(track_data, track_offset, sample_data, sample_offset,
clip_duration, sample_volume, ramp_in, ramp_out):
"""Mixes the sample data into the main track at the specified offset.
Args:
track_data: Numpy array holding main audio data. Modified in-place.
track_offset: Where to mix the sample into the main track.
sample_data: Numpy array of audio data to mix into the main track.
sample_offset: Where to start in the audio sample.
clip_duration: How long the sample segment is.
sample_volume: Loudness to mix the sample in at.
ramp_in: Length in samples of volume increase stage.
ramp_out: Length in samples of volume decrease stage.
"""
ramp_out_index = clip_duration - ramp_out
track_end = min(track_offset + clip_duration, track_data.shape[0])
track_end = min(track_end,
track_offset + (sample_data.shape[0] - sample_offset))
sample_range = track_end - track_offset
for i in range(sample_range):
if i < ramp_in:
envelope_scale = i / ramp_in
elif i > ramp_out_index:
envelope_scale = (clip_duration - i) / ramp_out
else:
envelope_scale = 1
sample_input = sample_data[sample_offset + i]
track_data[track_offset
+ i] += sample_input * envelope_scale * sample_volume
def main(_):
words_list = input_data.prepare_words_list(FLAGS.wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.window_size_ms, FLAGS.window_stride_ms, FLAGS.feature_bin_count,
'mfcc')
audio_processor = input_data.AudioProcessor(
'', FLAGS.data_dir, FLAGS.silence_percentage, 10,
FLAGS.wanted_words.split(','), FLAGS.validation_percentage,
FLAGS.testing_percentage, model_settings, FLAGS.data_dir)
output_audio_sample_count = FLAGS.sample_rate * FLAGS.test_duration_seconds
output_audio = np.zeros((output_audio_sample_count,), dtype=np.float32)
# Set up background audio.
background_crossover_ms = 500
background_segment_duration_ms = (
FLAGS.clip_duration_ms + background_crossover_ms)
background_segment_duration_samples = int(
(background_segment_duration_ms * FLAGS.sample_rate) / 1000)
background_segment_stride_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
background_ramp_samples = int(
((background_crossover_ms / 2) * FLAGS.sample_rate) / 1000)
# Mix the background audio into the main track.
how_many_backgrounds = int(
math.ceil(output_audio_sample_count / background_segment_stride_samples))
for i in range(how_many_backgrounds):
output_offset = int(i * background_segment_stride_samples)
background_index = np.random.randint(len(audio_processor.background_data))
background_samples = audio_processor.background_data[background_index]
background_offset = np.random.randint(
0, len(background_samples) - model_settings['desired_samples'])
background_volume = np.random.uniform(0, FLAGS.background_volume)
mix_in_audio_sample(output_audio, output_offset, background_samples,
background_offset, background_segment_duration_samples,
background_volume, background_ramp_samples,
background_ramp_samples)
# Mix the words into the main track, noting their labels and positions.
output_labels = []
word_stride_ms = FLAGS.clip_duration_ms + FLAGS.word_gap_ms
word_stride_samples = int((word_stride_ms * FLAGS.sample_rate) / 1000)
clip_duration_samples = int(
(FLAGS.clip_duration_ms * FLAGS.sample_rate) / 1000)
word_gap_samples = int((FLAGS.word_gap_ms * FLAGS.sample_rate) / 1000)
how_many_words = int(
math.floor(output_audio_sample_count / word_stride_samples))
all_test_data, all_test_labels = audio_processor.get_unprocessed_data(
-1, model_settings, 'testing')
for i in range(how_many_words):
output_offset = (
int(i * word_stride_samples) + np.random.randint(word_gap_samples))
output_offset_ms = (output_offset * 1000) / FLAGS.sample_rate
is_unknown = np.random.randint(100) < FLAGS.unknown_percentage
if is_unknown:
wanted_label = input_data.UNKNOWN_WORD_LABEL
else:
wanted_label = words_list[2 + np.random.randint(len(words_list) - 2)]
test_data_start = np.random.randint(len(all_test_data))
found_sample_data = None
index_lookup = np.arange(len(all_test_data), dtype=np.int32)
np.random.shuffle(index_lookup)
for test_data_offset in range(len(all_test_data)):
test_data_index = index_lookup[(
test_data_start + test_data_offset) % len(all_test_data)]
current_label = all_test_labels[test_data_index]
if current_label == wanted_label:
found_sample_data = all_test_data[test_data_index]
break
mix_in_audio_sample(output_audio, output_offset, found_sample_data, 0,
clip_duration_samples, 1.0, 500, 500)
output_labels.append({'label': wanted_label, 'time': output_offset_ms})
input_data.save_wav_file(FLAGS.output_audio_file, output_audio,
FLAGS.sample_rate)
tf.compat.v1.logging.info('Saved streaming test wav to %s',
FLAGS.output_audio_file)
with open(FLAGS.output_labels_file, 'w') as f:
for output_label in output_labels:
f.write('%s, %f\n' % (output_label['label'], output_label['time']))
tf.compat.v1.logging.info('Saved streaming test labels to %s',
FLAGS.output_labels_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data_url',
type=str,
# pylint: disable=line-too-long
default='https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz',
# pylint: enable=line-too-long
help='Location of speech training data')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/speech_dataset',
help="""\
Where to download the speech training data to.
""")
parser.add_argument(
'--background_dir',
type=str,
default='',
help="""\
Path to a directory of .wav files to mix in as background noise during training.
""")
parser.add_argument(
'--background_volume',
type=float,
default=0.1,
help="""\
How loud the background noise should be, between 0 and 1.
""")
parser.add_argument(
'--background_frequency',
type=float,
default=0.8,
help="""\
How many of the training samples have background noise mixed in.
""")
parser.add_argument(
'--silence_percentage',
type=float,
default=10.0,
help="""\
How much of the training data should be silence.
""")
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a test set.')
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of wavs to use as a validation set.')
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs.',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs.',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_audio_file',
type=str,
default='/tmp/speech_commands_train/streaming_test.wav',
help='File to save the generated test audio to.')
parser.add_argument(
'--output_labels_file',
type=str,
default='/tmp/speech_commands_train/streaming_test_labels.txt',
help='File to save the generated test labels to.')
parser.add_argument(
'--test_duration_seconds',
type=int,
default=600,
help='How long the generated test audio file should be.',)
parser.add_argument(
'--word_gap_ms',
type=int,
default=2000,
help='How long the average gap should be between words.',)
parser.add_argument(
'--unknown_percentage',
type=int,
default=30,
help='What percentage of words should be unknown.')
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed) | python | github | https://github.com/tensorflow/tensorflow | tensorflow/examples/speech_commands/generate_streaming_test_wav.py |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import testtools
import pexpect
from mock import Mock, MagicMock, patch, mock_open
from trove.guestagent import volume
from trove.common import utils
def _setUp_fake_spawn(return_val=0):
fake_spawn = pexpect.spawn('echo')
fake_spawn.expect = Mock(return_value=return_val)
pexpect.spawn = Mock(return_value=fake_spawn)
return fake_spawn
class VolumeDeviceTest(testtools.TestCase):
def setUp(self):
super(VolumeDeviceTest, self).setUp()
self.volumeDevice = volume.VolumeDevice('/dev/vdb')
def tearDown(self):
super(VolumeDeviceTest, self).tearDown()
def test_migrate_data(self):
origin_execute = utils.execute
utils.execute = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
fake_spawn = _setUp_fake_spawn()
origin_unmount = self.volumeDevice.unmount
self.volumeDevice.unmount = MagicMock()
self.volumeDevice.migrate_data('/')
self.assertEqual(1, fake_spawn.expect.call_count)
self.assertEqual(1, utils.execute.call_count)
self.assertEqual(1, self.volumeDevice.unmount.call_count)
utils.execute = origin_execute
self.volumeDevice.unmount = origin_unmount
os.path.exists = origin_os_path_exists
def test__check_device_exists(self):
origin_execute = utils.execute
utils.execute = Mock()
self.volumeDevice._check_device_exists()
self.assertEqual(1, utils.execute.call_count)
utils.execute = origin_execute
def test__check_format(self):
fake_spawn = _setUp_fake_spawn()
self.volumeDevice._check_format()
self.assertEqual(1, fake_spawn.expect.call_count)
def test__check_format_2(self):
fake_spawn = _setUp_fake_spawn(return_val=1)
self.assertEqual(0, fake_spawn.expect.call_count)
self.assertRaises(IOError, self.volumeDevice._check_format)
def test__format(self):
fake_spawn = _setUp_fake_spawn()
self.volumeDevice._format()
self.assertEqual(1, fake_spawn.expect.call_count)
self.assertEqual(1, pexpect.spawn.call_count)
def test_format(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
origin_format = self.volumeDevice._format
origin_check_format = self.volumeDevice._check_format
self.volumeDevice._check_device_exists = MagicMock()
self.volumeDevice._check_format = MagicMock()
self.volumeDevice._format = MagicMock()
self.volumeDevice.format()
self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
self.assertEqual(1, self.volumeDevice._format.call_count)
self.assertEqual(1, self.volumeDevice._check_format.call_count)
self.volumeDevice._check_device_exists = origin_check_device_exists
self.volumeDevice._format = origin_format
self.volumeDevice._check_format = origin_check_format
def test_mount(self):
origin_ = volume.VolumeMountPoint.mount
volume.VolumeMountPoint.mount = Mock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
origin_write_to_fstab = volume.VolumeMountPoint.write_to_fstab
volume.VolumeMountPoint.write_to_fstab = Mock()
self.volumeDevice.mount(Mock)
self.assertEqual(1, volume.VolumeMountPoint.mount.call_count)
self.assertEqual(1, volume.VolumeMountPoint.write_to_fstab.call_count)
volume.VolumeMountPoint.mount = origin_
volume.VolumeMountPoint.write_to_fstab = origin_write_to_fstab
os.path.exists = origin_os_path_exists
def test_resize_fs(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
origin_execute = utils.execute
utils.execute = Mock()
self.volumeDevice._check_device_exists = MagicMock()
origin_os_path_exists = os.path.exists
os.path.exists = Mock()
self.volumeDevice.resize_fs('/mnt/volume')
self.assertEqual(1, self.volumeDevice._check_device_exists.call_count)
self.assertEqual(2, utils.execute.call_count)
self.volumeDevice._check_device_exists = origin_check_device_exists
os.path.exists = origin_os_path_exists
utils.execute = origin_execute
def test_unmount_positive(self):
self._test_unmount()
def test_unmount_negative(self):
self._test_unmount(False)
def _test_unmount(self, positive=True):
origin_ = os.path.exists
os.path.exists = MagicMock(return_value=positive)
fake_spawn = _setUp_fake_spawn()
self.volumeDevice.unmount('/mnt/volume')
COUNT = 1
if not positive:
COUNT = 0
self.assertEqual(COUNT, fake_spawn.expect.call_count)
os.path.exists = origin_
def test_set_readahead_size(self):
origin_check_device_exists = self.volumeDevice._check_device_exists
self.volumeDevice._check_device_exists = MagicMock()
mock_execute = MagicMock(return_value=None)
readahead_size = 2048
self.volumeDevice.set_readahead_size(readahead_size,
execute_function=mock_execute)
blockdev = mock_execute.call_args_list[0]
blockdev.assert_called_with("sudo", "blockdev", "--setra",
readahead_size, "/dev/vdb")
self.volumeDevice._check_device_exists = origin_check_device_exists
class VolumeMountPointTest(testtools.TestCase):
def setUp(self):
super(VolumeMountPointTest, self).setUp()
self.volumeMountPoint = volume.VolumeMountPoint('/mnt/device',
'/dev/vdb')
def tearDown(self):
super(VolumeMountPointTest, self).tearDown()
def test_mount(self):
origin_ = os.path.exists
os.path.exists = MagicMock(return_value=False)
fake_spawn = _setUp_fake_spawn()
utils.execute = Mock()
self.volumeMountPoint.mount()
self.assertEqual(1, os.path.exists.call_count)
self.assertEqual(1, utils.execute.call_count)
self.assertEqual(1, fake_spawn.expect.call_count)
os.path.exists = origin_
def test_write_to_fstab(self):
origin_execute = utils.execute
utils.execute = Mock()
m = mock_open()
with patch('%s.open' % volume.__name__, m, create=True):
self.volumeMountPoint.write_to_fstab()
self.assertEqual(1, utils.execute.call_count)
utils.execute = origin_execute | unknown | codeparrot/codeparrot-clean | ||
{
"kind": "CustomResourceDefinition",
"apiVersion": "apiextensions.k8s.io/v1",
"metadata": {
"name": "dashboardcompatibilityscores.dashvalidator.ext.grafana.com"
},
"spec": {
"group": "dashvalidator.ext.grafana.com",
"versions": [
{
"name": "v1alpha1",
"served": true,
"storage": true,
"schema": {
"openAPIV3Schema": {
"properties": {
"spec": {
"properties": {
"dashboardJson": {
"description": "Complete dashboard JSON object to validate.\nMust be a v1 dashboard schema (contains \"panels\" array).\nv2 dashboards (with \"elements\" structure) are not yet supported.",
"type": "object",
"x-kubernetes-preserve-unknown-fields": true
},
"datasourceMappings": {
"description": "Array of datasources to validate against.\nThe validator will check dashboard queries against each datasource\nand provide per-datasource compatibility results.\n\nMVP: Only single datasource supported (array length = 1), Prometheus type only.\nFuture: Will support multiple datasources for dashboards with mixed queries.",
"items": {
"description": "DataSourceMapping specifies a datasource to validate dashboard queries against.\nMaps logical datasource references in the dashboard to actual datasource instances.",
"properties": {
"name": {
"description": "Optional human-readable name for display in results.\nIf not provided, UID will be used in error messages.\nExample: \"Production Prometheus (US-West)\"",
"type": "string"
},
"type": {
"description": "Type of datasource plugin.\nMVP: Only \"prometheus\" supported.\nFuture: \"mysql\", \"postgres\", \"elasticsearch\", etc.",
"type": "string"
},
"uid": {
"description": "Unique identifier of the datasource instance.\nExample: \"prometheus-prod-us-west\"",
"type": "string"
}
},
"required": ["uid", "type"],
"type": "object"
},
"type": "array"
}
},
"required": ["dashboardJson", "datasourceMappings"],
"type": "object"
},
"status": {
"properties": {
"additionalFields": {
"description": "additionalFields is reserved for future use",
"type": "object",
"x-kubernetes-preserve-unknown-fields": true
},
"compatibilityScore": {
"description": "Overall compatibility score across all datasources (0-100).\nCalculated as: (total found metrics / total referenced metrics) * 100\n\nScore interpretation:\n- 100: Perfect compatibility, all queries will work\n- 80-99: Excellent, minor missing metrics\n- 50-79: Fair, significant missing metrics\n- 0-49: Poor, most queries will fail",
"type": "number"
},
"datasourceResults": {
"description": "Per-datasource validation results.\nArray length matches spec.datasourceMappings.\nEach element contains detailed metrics and query-level breakdown.",
"items": {
"description": "DataSourceResult contains validation results for a single datasource.\nProvides aggregate statistics and per-query breakdown of compatibility.",
"properties": {
"checkedQueries": {
"description": "Number of queries successfully validated.\nMay be less than totalQueries if some queries couldn't be parsed.",
"type": "integer"
},
"compatibilityScore": {
"description": "Overall compatibility score for this datasource (0-100).\nCalculated as: (foundMetrics / totalMetrics) * 100\nUsed to calculate the global compatibilityScore in status.",
"type": "number"
},
"foundMetrics": {
"description": "Number of metrics that exist in the datasource schema.\nfoundMetrics \u003c= totalMetrics",
"type": "integer"
},
"missingMetrics": {
"description": "Array of metric names that were referenced but don't exist.\nUseful for debugging why a dashboard shows \"no data\".\nExample for Prometheus: [\"http_requests_total\", \"api_latency_seconds\"]",
"items": {
"type": "string"
},
"type": "array"
},
"name": {
"description": "Optional display name (matches DataSourceMapping.name if provided)",
"type": "string"
},
"queryBreakdown": {
"description": "Per-query breakdown showing which specific queries have issues.\nOne entry per query target (refId: \"A\", \"B\", \"C\", etc.) in each panel.\nAllows pinpointing exactly which panel/query needs fixing.",
"items": {
"description": "QueryBreakdown provides compatibility details for a single query within a panel.\nGranular per-query results allow users to identify exactly which queries need fixing.\n\nNote: A panel can have multiple queries (refId: \"A\", \"B\", \"C\", etc.),\nso there may be multiple QueryBreakdown entries for the same panelID.",
"properties": {
"compatibilityScore": {
"description": "Compatibility percentage for this individual query (0-100).\nCalculated as: (foundMetrics / totalMetrics) * 100\n100 = query will work perfectly, 0 = query will return no data.",
"type": "number"
},
"foundMetrics": {
"description": "Number of those metrics that exist in the datasource.\nfoundMetrics \u003c= totalMetrics",
"type": "integer"
},
"missingMetrics": {
"description": "Array of missing metric names specific to this query.\nHelps identify exactly which part of a query expression will fail.\nEmpty array means query is fully compatible.",
"items": {
"type": "string"
},
"type": "array"
},
"panelID": {
"description": "Numeric panel ID from dashboard JSON.\nUsed to correlate with dashboard structure.",
"type": "integer"
},
"panelTitle": {
"description": "Human-readable panel title for context.\nExample: \"CPU Usage\", \"Request Rate\"",
"type": "string"
},
"queryRefId": {
"description": "Query identifier within the panel.\nValues: \"A\", \"B\", \"C\", etc. (from panel.targets[].refId)\nUniquely identifies which query in a multi-query panel this refers to.",
"type": "string"
},
"totalMetrics": {
"description": "Number of unique metrics referenced in this specific query.\nFor Prometheus: metrics extracted from the PromQL expr.\nExample: rate(http_requests_total[5m]) references 1 metric.",
"type": "integer"
}
},
"required": [
"panelTitle",
"panelID",
"queryRefId",
"totalMetrics",
"foundMetrics",
"missingMetrics",
"compatibilityScore"
],
"type": "object"
},
"type": "array"
},
"totalMetrics": {
"description": "Total number of unique metrics/identifiers referenced across all queries.\nFor Prometheus: metric names extracted from PromQL expressions.\nFor SQL datasources: table and column names.",
"type": "integer"
},
"totalQueries": {
"description": "Total number of queries in the dashboard targeting this datasource.\nIncludes all panel targets/queries that reference this datasource.",
"type": "integer"
},
"type": {
"description": "Datasource type (matches DataSourceMapping.type)",
"type": "string"
},
"uid": {
"description": "Datasource UID that was validated (matches DataSourceMapping.uid)",
"type": "string"
}
},
"required": [
"uid",
"type",
"totalQueries",
"checkedQueries",
"totalMetrics",
"foundMetrics",
"missingMetrics",
"queryBreakdown",
"compatibilityScore"
],
"type": "object"
},
"type": "array"
},
"lastChecked": {
"description": "ISO 8601 timestamp of when validation was last performed.\nExample: \"2024-01-15T10:30:00Z\"",
"type": "string"
},
"message": {
"description": "Human-readable summary of validation result.\nExamples: \"All queries compatible\", \"3 missing metrics found\"",
"type": "string"
},
"operatorStates": {
"additionalProperties": {
"properties": {
"descriptiveState": {
"description": "descriptiveState is an optional more descriptive state field which has no requirements on format",
"type": "string"
},
"details": {
"description": "details contains any extra information that is operator-specific",
"type": "object",
"x-kubernetes-preserve-unknown-fields": true
},
"lastEvaluation": {
"description": "lastEvaluation is the ResourceVersion last evaluated",
"type": "string"
},
"state": {
"description": "state describes the state of the lastEvaluation.\nIt is limited to three possible states for machine evaluation.",
"enum": ["success", "in_progress", "failed"],
"type": "string"
}
},
"required": ["lastEvaluation", "state"],
"type": "object"
},
"description": "operatorStates is a map of operator ID to operator state evaluations.\nAny operator which consumes this kind SHOULD add its state evaluation information to this field.",
"type": "object"
}
},
"required": ["compatibilityScore", "datasourceResults"],
"type": "object"
}
},
"required": ["spec"],
"type": "object"
}
},
"subresources": {
"status": {}
}
}
],
"names": {
"kind": "DashboardCompatibilityScore",
"plural": "dashboardcompatibilityscores"
},
"scope": "Namespaced"
}
} | json | github | https://github.com/grafana/grafana | apps/dashvalidator/definitions/dashboardcompatibilityscore.dashvalidator.ext.grafana.com.json |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for BrainTree v2 tree evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
INFERENCE_OPS_FILE = '_inference_ops.so'
_inference_ops = None
_ops_lock = threading.Lock()
# TODO(b/31222613): This op may be differentiable, and there may be
# latent bugs here.
ops.NotDifferentiable('TreePredictions')
ops.RegisterShape('TreePredictions')(common_shapes.call_cpp_shape_fn)
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
# In which case, "import tensorflow" will always crash, even for users that
# never use contrib.
def Load():
"""Load the inference ops library and return the loaded module."""
with _ops_lock:
global _inference_ops
if not _inference_ops:
ops_path = resource_loader.get_path_to_datafile(INFERENCE_OPS_FILE)
logging.info('data path: %s', ops_path)
_inference_ops = load_library.load_op_library(ops_path)
assert _inference_ops, 'Could not load inference_ops.so'
return _inference_ops | unknown | codeparrot/codeparrot-clean | ||
# This file implements a class which forms an interface to the .cdplayerrc
# file that is maintained by SGI's cdplayer program.
#
# Usage is as follows:
#
# import readcd
# r = readcd.Readcd()
# c = Cdplayer(r.gettrackinfo())
#
# Now you can use c.artist, c.title and c.track[trackno] (where trackno
# starts at 1). When the CD is not recognized, all values will be the empty
# string.
# It is also possible to set the above mentioned variables to new values.
# You can then use c.write() to write out the changed values to the
# .cdplayerrc file.
cdplayerrc = '.cdplayerrc'
class Cdplayer:
def __init__(self, tracklist):
import string
self.artist = ''
self.title = ''
if type(tracklist) == type(''):
t = []
for i in range(2, len(tracklist), 4):
t.append((None, \
(int(tracklist[i:i+2]), \
int(tracklist[i+2:i+4]))))
tracklist = t
self.track = [None] + [''] * len(tracklist)
self.id = 'd' + string.zfill(len(tracklist), 2)
for track in tracklist:
start, length = track
self.id = self.id + string.zfill(length[0], 2) + \
string.zfill(length[1], 2)
try:
import posix
f = open(posix.environ['HOME'] + '/' + cdplayerrc, 'r')
except IOError:
return
import re
reg = re.compile(r'^([^:]*):\t(.*)')
s = self.id + '.'
l = len(s)
while 1:
line = f.readline()
if line == '':
break
if line[:l] == s:
line = line[l:]
match = reg.match(line)
if not match:
print 'syntax error in ~/' + cdplayerrc
continue
name, value = match.group(1, 2)
if name == 'title':
self.title = value
elif name == 'artist':
self.artist = value
elif name[:5] == 'track':
trackno = int(name[6:])
self.track[trackno] = value
f.close()
def write(self):
import posix
filename = posix.environ['HOME'] + '/' + cdplayerrc
try:
old = open(filename, 'r')
except IOError:
old = open('/dev/null', 'r')
new = open(filename + '.new', 'w')
s = self.id + '.'
l = len(s)
while 1:
line = old.readline()
if line == '':
break
if line[:l] != s:
new.write(line)
new.write(self.id + '.title:\t' + self.title + '\n')
new.write(self.id + '.artist:\t' + self.artist + '\n')
for i in range(1, len(self.track)):
new.write('%s.track.%r:\t%s\n' % (i, track))
old.close()
new.close()
posix.rename(filename + '.new', filename) | unknown | codeparrot/codeparrot-clean | ||
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
import warnings
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (atleast2d_or_csr, check_arrays, check_random_state,
column_or_1d)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..externals import six
from .sgd_fast import plain_sgd
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self._validate_params()
self.coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _init_t(self, loss_function):
"""Initialize iteration counter attr ``t_``.
If ``self.learning_rate=='optimal'`` initialize ``t_`` such that
``eta`` at first sample equals ``self.eta0``.
"""
self.t_ = 1.0
if self.learning_rate == "optimal":
typw = np.sqrt(1.0 / np.sqrt(self.alpha))
# computing eta0, the initial learning rate
eta0 = typw / max(1.0, loss_function.dloss(-typw, 1.0))
# initialize t such that eta at first sample equals eta0
self.t_ = 1.0 / (eta0 * self.alpha)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided coef_ does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=np.float64, order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
def _check_fit_data(X, y):
"""Check if shape of input data matches. """
n_samples, _ = X.shape
if n_samples != y.shape[0]:
raise ValueError("Shapes of X and y do not match.")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
if len(est.classes_) == 2:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.coef_[i]
intercept = est.intercept_[i]
return y_i, coef, intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
y_i, coef, intercept = _prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X = atleast2d_or_csr(X, dtype=np.float64, order="C")
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
_check_fit_data(X, y)
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
y_ind = np.searchsorted(self.classes_, y) # XXX use a LabelBinarizer?
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_,
y_ind)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self._init_t(self.loss_function)
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
self.t_ += n_iter * n_samples
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X = atleast2d_or_csr(X, dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
# need to be 2d
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level
epsilon: float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs: integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label : weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [1, n_features] if n_classes == 2 else [n_classes,
n_features]
Weights assigned to the features.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_arrays(X, y, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
_check_fit_data(X, y)
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
self.t_ += n_iter * n_samples
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
X = atleast2d_or_csr(X)
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self._init_t(loss_function)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
self.coef_, intercept = plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.intercept_ = np.atleast_1d(intercept)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' migh bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level.
epsilon: float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(t+t0)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [n_features]
Weights asigned to the features.
`intercept_` : array, shape = [1]
The intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
SGDRegressor(alpha=0.0001, epsilon=0.1, eta0=0.01, fit_intercept=True,
l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss',
n_iter=5, penalty='l2', power_t=0.25, random_state=None,
shuffle=False, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start) | unknown | codeparrot/codeparrot-clean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.