hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf09a9808b1a754a0359d951b223b1f5ba0e743 | 9,513 | py | Python | spyne/test/interop/server/_service.py | leobispo/spyne | c80c9c797869df048bbe74ed9cba63972531009c | [
"BSD-3-Clause"
] | 1 | 2018-02-27T08:02:40.000Z | 2018-02-27T08:02:40.000Z | spyne/test/interop/server/_service.py | leobispo/spyne | c80c9c797869df048bbe74ed9cba63972531009c | [
"BSD-3-Clause"
] | null | null | null | spyne/test/interop/server/_service.py | leobispo/spyne | c80c9c797869df048bbe74ed9cba63972531009c | [
"BSD-3-Clause"
] | null | null | null |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from spyne.model.binary import Attachment
from spyne.model.complex import Array
from spyne.model.complex import ComplexModel
from spyne.model.complex import SelfReference
from spyne.model.enum import Enum
from spyne.model.fault import Fault
from spyne.model.primitive import AnyXml
from spyne.model.primitive import AnyDict
from spyne.model.primitive import Boolean
from spyne.model.primitive import DateTime
from spyne.model.primitive import Float
from spyne.model.primitive import Integer
from spyne.model.primitive import Duration
from spyne.model.primitive import String
from spyne.model.primitive import Double
from spyne.service import ServiceBase
from spyne.decorator import rpc
from spyne.decorator import srpc
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class SimpleClass(ComplexModel):
i = Integer
s = String
class DocumentedFault(Fault):
def __init__(self):
Fault.__init__(self,
faultcode="Documented",
faultstring="A documented fault",
faultactor='http://faultactor.example.com',
)
class OtherClass(ComplexModel):
dt = DateTime
d = Double
b = Boolean
class ClassWithSelfReference(ComplexModel):
i = Integer
sr = SelfReference
class NestedClass(ComplexModel):
__namespace__ = "punk.tunk"
simple = Array(SimpleClass)
s = String
i = Integer
f = Float
other = OtherClass
ai = Array(Integer)
class NonNillableClass(ComplexModel):
__namespace__ = "hunk.sunk"
nillable = False
min_occurs = 1
dt = DateTime(min_occurs=1, nillable=False)
i = Integer(nillable=False)
s = String(min_len=1, nillable=False)
class ExtensionClass(NestedClass):
__namespace__ = "bar"
p = NonNillableClass
l = DateTime
q = Integer
DaysOfWeekEnum = Enum(
'Monday',
'Tuesday',
'Wednesday',
'Friday',
'Saturday',
'Sunday',
type_name = 'DaysOfWeekEnum'
)
class InHeader(ComplexModel):
__namespace__ = "spyne.test.interop.server"
s=String
i=Integer
class OutHeader(ComplexModel):
__namespace__ = "spyne.test.interop.server"
dt=DateTime
f=Float
class InTraceHeader(ComplexModel):
__namespace__ = "spyne.test.interop.server"
client=String
callDate=DateTime
class OutTraceHeader(ComplexModel):
__namespace__ = "spyne.test.interop.server"
receiptDate=DateTime
returnDate=DateTime
class InteropServiceWithHeader(ServiceBase):
__in_header__ = InHeader
__out_header__ = OutHeader
@rpc(_returns=InHeader)
def echo_in_header(ctx):
return ctx.in_header
@rpc(_returns=OutHeader)
def send_out_header(ctx):
ctx.out_header = OutHeader()
ctx.out_header.dt = datetime(year=2000, month=1, day=1)
ctx.out_header.f = 3.141592653
return ctx.out_header
class InteropServiceWithComplexHeader(ServiceBase):
__in_header__ = (InHeader, InTraceHeader)
__out_header__ = (OutHeader, OutTraceHeader)
@rpc(_returns=(InHeader, InTraceHeader))
def echo_in_complex_header(ctx):
return ctx.in_header
@rpc(_returns=(OutHeader, OutTraceHeader))
def send_out_complex_header(ctx):
out_header = OutHeader()
out_header.dt = datetime(year=2000, month=1, day=1)
out_header.f = 3.141592653
out_trace_header = OutTraceHeader()
out_trace_header.receiptDate = datetime(year=2000, month=1, day=1,
hour=1, minute=1, second=1, microsecond=1)
out_trace_header.returnDate = datetime(year=2000, month=1, day=1,
hour=1, minute=1, second=1, microsecond=100)
ctx.out_header = (out_header, out_trace_header)
return ctx.out_header
class InteropPrimitive(ServiceBase):
@srpc(AnyXml, _returns=AnyXml)
def echo_any(xml):
return xml
@srpc(AnyDict, _returns=AnyDict)
def echo_any_as_dict(xml_as_dict):
return xml_as_dict
@srpc(Integer, _returns=Integer)
def echo_integer(i):
return i
@srpc(String, _returns=String)
def echo_string(s):
return s
@srpc(DateTime, _returns=DateTime)
def echo_datetime(dt):
return dt
@srpc(DateTime(format='ignored'), _returns=DateTime)
def echo_datetime_with_invalid_format(dt):
return dt
@srpc(Float, _returns=Float)
def echo_float(f):
return f
@srpc(Double, _returns=Double)
def echo_double(f):
return f
@srpc(Boolean, _returns=Boolean)
def echo_boolean(b):
return b
@srpc(DaysOfWeekEnum, _returns=DaysOfWeekEnum)
def echo_enum(day):
return day
@srpc(Duration, _returns=Duration)
def echo_duration(dur):
return dur
class InteropArray(ServiceBase):
@srpc(Array(Integer), _returns=Array(Integer))
def echo_integer_array(ia):
return ia
@srpc(Array(String), _returns=Array(String))
def echo_string_array(sa):
return sa
@srpc(Array(DateTime), _returns=Array(DateTime))
def echo_date_time_array(dta):
return dta
@srpc(Array(Float), _returns=Array(Float))
def echo_float_array(fa):
return fa
@srpc(Array(Double), _returns=Array(Double))
def echo_double_array(da):
return da
@srpc(Array(Boolean), _returns=Array(Boolean))
def echo_boolean_array(ba):
return ba
@srpc(Boolean(max_occurs="unbounded"), _returns=Boolean(max_occurs="unbounded"))
def echo_simple_boolean_array(ba):
return ba
@srpc(Array(Boolean), _returns=Array(Array(Boolean)))
def echo_array_in_array(baa):
return baa
class InteropClass(ServiceBase):
@srpc(SimpleClass, _returns=SimpleClass)
def echo_simple_class(sc):
return sc
@srpc(Array(SimpleClass), _returns=Array(SimpleClass))
def echo_simple_class_array(sca):
return sca
@srpc(NestedClass, _returns=NestedClass)
def echo_nested_class(nc):
return nc
@srpc(Array(NestedClass), _returns=Array(NestedClass))
def echo_nested_class_array(nca):
return nca
@srpc(ExtensionClass, _returns=ExtensionClass)
def echo_extension_class(nc):
return nc
@srpc(ClassWithSelfReference, _returns=ClassWithSelfReference)
def echo_class_with_self_reference(sr):
return sr
@srpc(Attachment, _returns=Attachment)
def echo_attachment(a):
assert isinstance(a, Attachment)
return a
@srpc(Array(Attachment), _returns=Array(Attachment))
def echo_attachment_array(aa):
return aa
class InteropException(ServiceBase):
@srpc()
def python_exception():
raise Exception("Possible")
@srpc()
def soap_exception():
raise Fault("Plausible", "A plausible fault", 'http://faultactor.example.com')
@srpc(_throws=DocumentedFault)
def documented_exception():
raise DocumentedFault()
class InteropMisc(ServiceBase):
@srpc(
_returns=[
Integer,
String,
Integer,
Array(Enum("MEMBER", type_name="RoleEnum"))
],
_out_variable_names=[
'resultCode',
'resultDescription',
'transactionId',
'roles'
]
)
def complex_return():
return [1, "Test", 123, ["MEMBER"]]
@srpc(_returns=Integer)
def huge_number():
return 2**int(1e5)
@srpc(_returns=String)
def long_string():
return ('0123456789abcdef' * 16384)
@srpc()
def test_empty():
pass
@srpc(String, Integer, DateTime)
def multi_param(s, i, dt):
pass
@srpc(NonNillableClass, _returns=String)
def non_nillable(n):
return "OK"
@srpc(String, _returns=String, _public_name="do_something")
def do_something_else(s):
return s
@srpc(Integer, _returns=Array(OtherClass))
def return_other_class_array(num):
for i in range(num):
yield OtherClass(dt=datetime(2010, 12, 6), d=3.0, b=True)
@srpc(_returns=Attachment)
def return_binary_data():
return Attachment(data=''.join([chr(i) for i in range(256)]))
@srpc(_returns=Integer)
def return_invalid_data():
return 'a'
@srpc(String,
_public_name="urn:#getCustomMessages",
_in_message="getCustomMessagesMsgIn",
_out_message="getCustomMessagesMsgOut",
_out_variable_name="CustomMessages",
_returns=String)
def custom_messages(s):
return s
services = [
InteropPrimitive,
InteropArray,
InteropClass,
InteropMisc,
InteropServiceWithHeader,
InteropServiceWithComplexHeader,
InteropException,
]
| 26.063014 | 86 | 0.675917 |
acf09b75c980213e0db87c7e36ddc387244464a8 | 1,368 | py | Python | tools/c7n_gcp/setup.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | null | null | null | tools/c7n_gcp/setup.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | null | null | null | tools/c7n_gcp/setup.py | anastasiia-zolochevska/cloud-custodian | f25315a01bec808c16ab0e2d433d6151cf5769e4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
setup(
name="c7n_gcp",
version='0.3.1',
description="Cloud Custodian - Multi Account",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/cloud-custodian/cloud-custodian",
license="Apache-2.0",
packages=find_packages(),
entry_points={
"custodian.resources": [
'gcp = c7n_gcp.entry:initialize_gcp']
},
install_requires=[
"c7n", "click",
"ratelimiter", "retrying",
"google-api-python-client>=1.7.3",
"google-auth-httplib2>=0.0.3",
"google-auth>=1.4.1",
"google-cloud-logging>=1.6.0",
"google-cloud-monitoring>=0.3.0"
]
)
| 32.571429 | 74 | 0.665936 |
acf09c6fb02aec75a741963058f89b2d540f2777 | 1,367 | py | Python | heavenLi_pyopengl/hliUIutils/drawImage.py | iyr/heavenli | 90f2786b0a8934302910f2214e71ee851e678baa | [
"BSD-3-Clause"
] | null | null | null | heavenLi_pyopengl/hliUIutils/drawImage.py | iyr/heavenli | 90f2786b0a8934302910f2214e71ee851e678baa | [
"BSD-3-Clause"
] | null | null | null | heavenLi_pyopengl/hliUIutils/drawImage.py | iyr/heavenli | 90f2786b0a8934302910f2214e71ee851e678baa | [
"BSD-3-Clause"
] | null | null | null | # Helper to simplify drawing images from disk
def drawImage(
imagePath,
gx,
gy,
ao,
scale,
w2h,
shape,
color,
refresh
):
flat_arr_list = []
xRes = 0
yRes = 0
# Avoid unneeded conversion computation
if ( not doesDrawCallExist(imagePath)
or
refresh):
img = Image.open(imagePath).convert('RGBA')
arr = np.array(img)
flat_arr = arr.ravel()
flat_arr_list = flat_arr.tolist()
xRes, yRes = img.size
if (shape == "square"):
drawImageSquare(
imagePath,
flat_arr_list,
0.0, 0.0,
0.0,
0.75,
xRes, yRes,
w2h,
color
)
elif (shape == "circle"):
drawImageCircle(
imagePath,
flat_arr_list,
0.0, 0.0,
0.0,
0.75,
xRes, yRes,
w2h,
color
)
else:
drawImageCircle(
imagePath,
flat_arr_list,
0.0, 0.0,
0.0,
0.75,
xRes, yRes,
w2h,
color
)
return
| 21.359375 | 51 | 0.375274 |
acf09cce6a17bdb0135c82552c6d378e8aba690d | 5,203 | py | Python | fudge/covariances/test/test_covarianceSuite.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | fudge/covariances/test/test_covarianceSuite.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | fudge/covariances/test/test_covarianceSuite.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | #! /usr/bin/env python3
# encoding: utf-8
# <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
"""
test fudge/covariances/
dbrown, 12/5/2012
"""
import unittest, os
from fudge.covariances import *
from fudge.covariances.covarianceSuite import readXML as CovReadXML
from fudge.reactionSuite import readXML as RxnReadXML
from xData import formatVersion as formatVersionModule
TEST_DATA_PATH, this_filename = os.path.split(__file__)
FeEvaluation = RxnReadXML( TEST_DATA_PATH+os.sep+'n-026_Fe_056-endfbvii.1.endf.gnds.xml')
FeCovariance = CovReadXML( TEST_DATA_PATH+os.sep+'n-026_Fe_056-endfbvii.1.endf.gndsCov.xml', reactionSuite=FeEvaluation )
class TestCaseBase( unittest.TestCase ):
def assertXMLListsEqual(self,x1,x2):
listAssertEqual = getattr( self, 'assertCountEqual', None )
if( listAssertEqual is None ) : listAssertEqual = getattr( self, 'assertItemsEqual' )
x1List = []
for line in x1:
if '\n' in line: x1List += [x.strip() for x in line.split('\n')]
else: x1List.append(line.strip())
x2List = []
for line in x2:
if '\n' in line: x2List += [x.strip() for x in line.split('\n')]
else: x2List.append(line.strip())
return listAssertEqual( x1List, x2List )
class Test_covarianceSuite( TestCaseBase ):
def setUp(self):
self.covSuite = FeCovariance
def test_readXML(self):
'''Also tests __init__ & parseXMLNode'''
self.assertIsInstance( self.covSuite, covarianceSuite.covarianceSuite )
def test__getitem__(self):
answer = """<covarianceSection label="nonelastic">
<rowData ENDF_MFMT="33,3" href="$reactions#/reactionSuite/sums/crossSectionSums/crossSectionSum[@label='nonelastic']/crossSection/resonancesWithBackground[@label='eval']"/>
<mixed label="eval">
<sum label="0" domainMin="1e-05" domainMax="862270.0" domainUnit="eV">
<!-- The matrix for this reaction equals the weighted sum of the following matrices: -->
<summand ENDF_MFMT="33,102" coefficient="1.0" href="/covarianceSuite/covarianceSections/covarianceSection[@label='Fe57 + photon']/mixed[@label='eval']"/></sum>
<covarianceMatrix label="1" type="relative">
<gridded2d>
<axes>
<grid index="2" label="row_energy_bounds" unit="eV" style="boundaries">
<values>1e-5 862270 4e6 2e7</values></grid>
<grid index="1" label="column_energy_bounds" unit="eV" style="link">
<link href="../../grid[@index='2']/values"/></grid>
<axis index="0" label="matrix_elements" unit=""/></axes>
<array shape="3,3" compression="diagonal">
<values>0 4e-4 9e-4</values></array></gridded2d></covarianceMatrix>
<covarianceMatrix label="2" type="relative">
<gridded2d>
<axes>
<grid index="2" label="row_energy_bounds" unit="eV" style="boundaries">
<values>1e-5 862270 1e6 2e6 4e6 6e6 8e6 1.4e7 2e7</values></grid>
<grid index="1" label="column_energy_bounds" unit="eV" style="link">
<link href="../../grid[@index='2']/values"/></grid>
<axis index="0" label="matrix_elements" unit=""/></axes>
<array shape="8,8" compression="diagonal">
<values>0 1.584e-3 0.025344 1.584e-3 1.584e-3 1.584e-3 1.584e-3 1.584e-3</values></array></gridded2d></covarianceMatrix>
<shortRangeSelfScalingVariance label="3" type="absolute" dependenceOnProcessedGroupWidth="inverse">
<gridded2d>
<axes>
<grid index="2" label="row_energy_bounds" unit="eV" style="boundaries">
<values>1e-5 862270 1e6 2e6 4e6 6e6 8e6 1.4e7 2e7</values></grid>
<grid index="1" label="column_energy_bounds" unit="eV" style="link">
<link href="../../grid[@index='2']/values"/></grid>
<axis index="0" label="matrix_elements" unit="b**2"/></axes>
<array shape="8,8" compression="diagonal">
<values>0 4.84e-10 1.357e-5 9.8057e-6 2.6915e-5 3.3173e-5 3.2769e-5 3.0954e-5</values></array></gridded2d></shortRangeSelfScalingVariance></mixed></covarianceSection>""".split('\n')
self.maxDiff = None
self.assertXMLListsEqual( self.covSuite.covarianceSections[2].toXMLList(formatVersion=formatVersionModule.gnds2_0), answer )
def test__len__(self):
self.assertEqual( len(self.covSuite.covarianceSections),60)
def test_addSection(self):
pass
def test_addModelParameterCovariance(self):
pass
def test_addReactionSum(self):
pass
def test_addExternalReaction(self):
pass
def test_saveToOpenedFile( self):
pass
def test_saveToFile( self):
pass
def test_check( self ):
pass
def test_fix( self ):
pass
def test_removeExtraZeros(self):
pass
def test_toXMLList(self):
'''Tested already in test__getitem__'''
pass
def test_toENDF6(self):
pass
if __name__=="__main__":
unittest.main()
| 39.416667 | 193 | 0.646934 |
acf09d37393351d68d2a9dee99a83f5d95be3a90 | 209 | py | Python | batch/batch/front_end/__main__.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | 2 | 2020-12-15T21:20:24.000Z | 2020-12-21T19:46:26.000Z | batch/batch/front_end/__main__.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | 3 | 2017-06-16T18:10:45.000Z | 2017-07-21T17:44:13.000Z | batch/batch/front_end/__main__.py | MariusDanner/hail | 5ca0305f8243b5888931b1afaa1fbfb617dee097 | [
"MIT"
] | 2 | 2020-07-28T18:55:19.000Z | 2020-10-19T16:43:03.000Z | from hailtop.hail_logging import configure_logging
# configure logging before importing anything else
configure_logging()
from .front_end import run # noqa: E402 pylint: disable=wrong-import-position
run()
| 26.125 | 78 | 0.818182 |
acf09d8942761ce92a838957e1011f8e949b5a7f | 111 | py | Python | api/v1/generics/__init__.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | null | null | null | api/v1/generics/__init__.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | 23 | 2021-12-01T10:00:38.000Z | 2021-12-11T11:43:13.000Z | api/v1/generics/__init__.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | null | null | null | from api.v1.generics.agents import AgentModelViewSet
from api.v1.generics.session import ProjectSessionViewSet
| 37 | 57 | 0.873874 |
acf09d8d161abc38b8103e899fb580020e3db61d | 3,574 | py | Python | storm_control/sc_hardware_backup/physikInstrumente/piPD72Z2x.py | BogdanBintu/MERFISH8 | f854b72ffbfc6c24093dd0a7504790b6aec4eb4d | [
"MIT"
] | null | null | null | storm_control/sc_hardware_backup/physikInstrumente/piPD72Z2x.py | BogdanBintu/MERFISH8 | f854b72ffbfc6c24093dd0a7504790b6aec4eb4d | [
"MIT"
] | null | null | null | storm_control/sc_hardware_backup/physikInstrumente/piPD72Z2x.py | BogdanBintu/MERFISH8 | f854b72ffbfc6c24093dd0a7504790b6aec4eb4d | [
"MIT"
] | null | null | null | import storm_control.sc_hardware.nationalInstruments.nicontrol as nicontrol
import sys
sys.path.append(r'C:\Software\microscope\PIPython-1.3.5.37')
from copy import deepcopy
import storm_control.sc_library.parameters as params
import storm_control.sc_hardware.baseClasses.voltageZModule as voltageZModule
from pipython import GCSDevice, pitools
CONTROLLERNAME = "E-709.CRG"
# STAGES = ['P-725.2CD'] # name in piMicroMove
SERIALNUM='120031914'
class piPD72Z2x(voltageZModule.VoltageZ):
def __init__(self, module_params=None, qt_settings=None, **kwds):
super().__init__(module_params, qt_settings, **kwds)
## __init__
# @param board The DAQ board to use.
# @param line The analog output line to use
# @param scale (Optional) Conversion from microns to volts (default is 10.0/250.0)
print(f"Serial number: {SERIALNUM}")
# Connect to piezo
pidevice = GCSDevice(CONTROLLERNAME)
pidevice.ConnectUSB(SERIALNUM)
print("Connected: {}".format(pidevice.qIDN().strip()))
# In the module pipython.pitools there are some helper
# functions to make using a PI device more convenient. The "startup"
# function will initialize your system. There are controllers that
# cannot discover the connected stages hence we set them with the
# "stages" argument. The desired referencing method (see controller
# user manual) is passed as "refmode" argument. All connected axes
# will be stopped if they are moving and their servo will be enabled.
# set volaltile reference mode to analog input
print("Setting up analog input")
pidevice.SPA('Z', 0x06000500, 2)
self.pidevice = pidevice
# Connect to the piezo.
self.good = 1
# get min and max range
#self.rangemin = pidevice.qTMN()
#self.rangemax = pidevice.qTMX()
#self.curpos = pidevice.qPOS()
# NI relevant bits
#print(f"Initializing NI board {board}, {line}")
#self.board = board
#self.line = line
#self.trigger_source = trigger_source
#self.scale = scale
#self.ni_task = nicontrol.AnalogOutput(self.board, self.line)
#self.ni_task.StartTask()
def zPosition(self):
"""
Query current z position in microns.
"""
if self.good:
z0 = self.pidevice.qPOS()['Z']
return {'z': z0}
def shutDown(self):
self.ni_task.stopTask()
self.ni_task.clearTask()
def zSetVelocity(self, z_vel):
pass
def getStatus(self):
return self.good
def zMoveTo(self, z):
try:
#nicontrol.setAnalogLine(self.board, self.line, z * self.scale)
voltage = z * self.scale
if (voltage > 0.0) and (voltage < 10.0):
self.ni_task.output(voltage)
except AssertionError as e:
print("Caught outputVoltage error:", type(e), str(e))
self.ni_task.stopTask()
self.ni_task.clearTask()
self.ni_task = nicontrol.AnalogOutput(self.board, self.line)
self.ni_task.startTask()
def shutDown(self):
if self.good:
pidevice.StopAll(noraise=True)
pitools.waitonready(pidevice) # there are controllers that need some time to halt all axes
pidevice.CloseConnection()
#
# Testing
#
if __name__ == "__main__":
stage = piPD72Z2x("USB-6002", 0)
stage.zMoveTo(125.0)
| 32.490909 | 103 | 0.626749 |
acf09dc8c09ca7cfddc999f20355379ffc4f2d8a | 22,704 | py | Python | .eggs/boto3-1.7.9-py2.7.egg/boto3/s3/inject.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | .eggs/boto3-1.7.9-py2.7.egg/boto3/s3/inject.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | .eggs/boto3-1.7.9-py2.7.egg/boto3/s3/inject.py | MQQ/git-bigstore | 95f1e37fcda7fdce80502593cec31a44c604cf8a | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.exceptions import ClientError
from boto3.s3.transfer import create_transfer_manager
from boto3.s3.transfer import TransferConfig, S3Transfer
from boto3.s3.transfer import ProgressCallbackInvoker
from boto3 import utils
def inject_s3_transfer_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'upload_file', upload_file)
utils.inject_attribute(class_attributes, 'download_file', download_file)
utils.inject_attribute(class_attributes, 'copy', copy)
utils.inject_attribute(class_attributes, 'upload_fileobj', upload_fileobj)
utils.inject_attribute(
class_attributes, 'download_fileobj', download_fileobj)
def inject_bucket_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'load', bucket_load)
utils.inject_attribute(class_attributes, 'upload_file', bucket_upload_file)
utils.inject_attribute(
class_attributes, 'download_file', bucket_download_file)
utils.inject_attribute(class_attributes, 'copy', bucket_copy)
utils.inject_attribute(
class_attributes, 'upload_fileobj', bucket_upload_fileobj)
utils.inject_attribute(
class_attributes, 'download_fileobj', bucket_download_fileobj)
def inject_object_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'upload_file', object_upload_file)
utils.inject_attribute(
class_attributes, 'download_file', object_download_file)
utils.inject_attribute(class_attributes, 'copy', object_copy)
utils.inject_attribute(
class_attributes, 'upload_fileobj', object_upload_fileobj)
utils.inject_attribute(
class_attributes, 'download_fileobj', object_download_fileobj)
def inject_object_summary_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'load', object_summary_load)
def bucket_load(self, *args, **kwargs):
"""
Calls s3.Client.list_buckets() to update the attributes of the Bucket
resource.
"""
# The docstring above is phrased this way to match what the autogenerated
# docs produce.
# We can't actually get the bucket's attributes from a HeadBucket,
# so we need to use a ListBuckets and search for our bucket.
# However, we may fail if we lack permissions to ListBuckets
# or the bucket is in another account. In which case, creation_date
# will be None.
self.meta.data = {}
try:
response = self.meta.client.list_buckets()
for bucket_data in response['Buckets']:
if bucket_data['Name'] == self.name:
self.meta.data = bucket_data
break
except ClientError as e:
if not e.response.get('Error', {}).get('Code') == 'AccessDenied':
raise
def object_summary_load(self, *args, **kwargs):
"""
Calls s3.Client.head_object to update the attributes of the ObjectSummary
resource.
"""
response = self.meta.client.head_object(
Bucket=self.bucket_name, Key=self.key)
if 'ContentLength' in response:
response['Size'] = response.pop('ContentLength')
self.meta.data = response
def upload_file(self, Filename, Bucket, Key, ExtraArgs=None,
Callback=None, Config=None):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
with S3Transfer(self, Config) as transfer:
return transfer.upload_file(
filename=Filename, bucket=Bucket, key=Key,
extra_args=ExtraArgs, callback=Callback)
def download_file(self, Bucket, Key, Filename, ExtraArgs=None,
Callback=None, Config=None):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
with S3Transfer(self, Config) as transfer:
return transfer.download_file(
bucket=Bucket, key=Key, filename=Filename,
extra_args=ExtraArgs, callback=Callback)
def bucket_upload_file(self, Filename, Key,
ExtraArgs=None, Callback=None, Config=None):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Bucket('mybucket').upload_file('/tmp/hello.txt', 'hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
return self.meta.client.upload_file(
Filename=Filename, Bucket=self.name, Key=Key,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def bucket_download_file(self, Key, Filename,
ExtraArgs=None, Callback=None, Config=None):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Bucket('mybucket').download_file('hello.txt', '/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
return self.meta.client.download_file(
Bucket=self.name, Key=Key, Filename=Filename,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def object_upload_file(self, Filename,
ExtraArgs=None, Callback=None, Config=None):
"""Upload a file to an S3 object.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Object('mybucket', 'hello.txt').upload_file('/tmp/hello.txt')
Similar behavior as S3Transfer's upload_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
return self.meta.client.upload_file(
Filename=Filename, Bucket=self.bucket_name, Key=self.key,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def object_download_file(self, Filename,
ExtraArgs=None, Callback=None, Config=None):
"""Download an S3 object to a file.
Usage::
import boto3
s3 = boto3.resource('s3')
s3.Object('mybucket', 'hello.txt').download_file('/tmp/hello.txt')
Similar behavior as S3Transfer's download_file() method,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
return self.meta.client.download_file(
Bucket=self.bucket_name, Key=self.key, Filename=Filename,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def copy(self, CopySource, Bucket, Key, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to another.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.meta.client.copy(copy_source, 'otherbucket', 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Bucket: str
:param Bucket: The name of the bucket to copy to
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.copy(
copy_source=CopySource, bucket=Bucket, key=Key,
extra_args=ExtraArgs, subscribers=subscribers,
source_client=SourceClient)
return future.result()
def bucket_copy(self, CopySource, Key, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to an object in this bucket.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
bucket = s3.Bucket('otherbucket')
bucket.copy(copy_source, 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
return self.meta.client.copy(
CopySource=CopySource, Bucket=self.name, Key=Key, ExtraArgs=ExtraArgs,
Callback=Callback, SourceClient=SourceClient, Config=Config)
def object_copy(self, CopySource, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to this object.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
bucket = s3.Bucket('otherbucket')
obj = bucket.Object('otherkey')
obj.copy(copy_source)
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
return self.meta.client.copy(
CopySource=CopySource, Bucket=self.bucket_name, Key=self.key,
ExtraArgs=ExtraArgs, Callback=Callback, SourceClient=SourceClient,
Config=Config)
def upload_fileobj(self, Fileobj, Bucket, Key, ExtraArgs=None,
Callback=None, Config=None):
"""Upload a file-like object to S3.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'rb') as data:
s3.upload_fileobj(data, 'mybucket', 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Bucket: str
:param Bucket: The name of the bucket to upload to.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
if not hasattr(Fileobj, 'read'):
raise ValueError('Fileobj must implement read')
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.upload(
fileobj=Fileobj, bucket=Bucket, key=Key,
extra_args=ExtraArgs, subscribers=subscribers)
return future.result()
def bucket_upload_fileobj(self, Fileobj, Key, ExtraArgs=None,
Callback=None, Config=None):
"""Upload a file-like object to this bucket.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
with open('filename', 'rb') as data:
bucket.upload_fileobj(data, 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
return self.meta.client.upload_fileobj(
Fileobj=Fileobj, Bucket=self.name, Key=Key, ExtraArgs=ExtraArgs,
Callback=Callback, Config=Config)
def object_upload_fileobj(self, Fileobj, ExtraArgs=None, Callback=None,
Config=None):
"""Upload a file-like object to this object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
obj = bucket.Object('mykey')
with open('filename', 'rb') as data:
obj.upload_fileobj(data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
return self.meta.client.upload_fileobj(
Fileobj=Fileobj, Bucket=self.bucket_name, Key=self.key,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None,
Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
if not hasattr(Fileobj, 'write'):
raise ValueError('Fileobj must implement write')
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with create_transfer_manager(self, config) as manager:
future = manager.download(
bucket=Bucket, key=Key, fileobj=Fileobj,
extra_args=ExtraArgs, subscribers=subscribers)
return future.result()
def bucket_download_fileobj(self, Key, Fileobj, ExtraArgs=None,
Callback=None, Config=None):
"""Download an object from this bucket to a file-like-object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
with open('filename', 'wb') as data:
bucket.download_fileobj('mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
return self.meta.client.download_fileobj(
Bucket=self.name, Key=Key, Fileobj=Fileobj, ExtraArgs=ExtraArgs,
Callback=Callback, Config=Config)
def object_download_fileobj(self, Fileobj, ExtraArgs=None, Callback=None,
Config=None):
"""Download this object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
obj = bucket.Object('mykey')
with open('filename', 'wb') as data:
obj.download_fileobj(data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
return self.meta.client.download_fileobj(
Bucket=self.bucket_name, Key=self.key, Fileobj=Fileobj,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
| 34.76876 | 79 | 0.676313 |
acf09e91be644ce717553657d62d44e37f5827b5 | 2,097 | py | Python | examples/rabi_demo/exp_graph.py | riverlane/deltalanguage | 41c3cfa88ed3f17956645c18566c2147a4bdd74c | [
"MIT"
] | 16 | 2021-01-06T17:44:51.000Z | 2022-01-06T12:07:07.000Z | examples/rabi_demo/exp_graph.py | riverlane/deltalanguage | 41c3cfa88ed3f17956645c18566c2147a4bdd74c | [
"MIT"
] | null | null | null | examples/rabi_demo/exp_graph.py | riverlane/deltalanguage | 41c3cfa88ed3f17956645c18566c2147a4bdd74c | [
"MIT"
] | 4 | 2021-03-25T20:35:08.000Z | 2021-09-06T13:10:58.000Z | import deltalanguage as dl
from examples.rabi_demo.aggregator import Aggregator, REPETITIONS, RESOLUTION
from examples.rabi_demo.commander import Commander
from examples.rabi_demo.helper_functions import generate_angles
@dl.DeltaBlock()
def experiment_stopper(completed: dl.Int(dl.Size(8))) -> dl.Void:
if completed:
if completed == 1:
raise dl.DeltaRuntimeExit
else:
print(f"The experiment returned error code: {completed}")
raise RuntimeError("Experiment returned an error", completed)
def get_graph():
"""Return the experiments graph `DeltaGraph` and data store instances.
Note that the aggregator and commanger files can be provided with
`vcd_name` which will lead to saving VCD of all signals for further
debugging.
"""
result_storage = dl.lib.StateSaver(int)
cmds_storage = dl.lib.StateSaver(dl.UInt(dl.Size(32)))
hal_template = dl.lib.hal_template
with dl.DeltaGraph() as graph:
ph_hal_result = dl.placeholder_node_factory()
ph_commander = dl.placeholder_node_factory()
# aggregator node of HAL results
result_aggregator = Aggregator(
name="result_aggregator",
vcd_name=None
).call(
hal_result=ph_hal_result,
shot_completed=ph_commander.shot_completed
)
# commander node to send HAL instructions
command_sender = Commander(
name="command_sender",
vcd_name=None
).call(
angle=result_aggregator.next_angle
)
hal_result = hal_template.call(hal_command=command_sender.hal_command)
# local store for experiment results
result_storage.save(result_aggregator.agg_result)
cmds_storage.save(command_sender.hal_command)
# tie up placeholders
ph_hal_result.specify_by_node(hal_result)
ph_commander.specify_by_node(command_sender)
# listen for flag to stop runtime
experiment_stopper(result_aggregator.completed)
return graph, result_storage, cmds_storage
| 32.765625 | 78 | 0.68908 |
acf09f21955bf0886af3eaabfd5c8ae72fe4b771 | 1,053 | py | Python | setup.py | bernt-matthias/argparse2tool | cca59571ad18f0bcc7b863ebbdd42a3480801aaf | [
"Apache-2.0"
] | null | null | null | setup.py | bernt-matthias/argparse2tool | cca59571ad18f0bcc7b863ebbdd42a3480801aaf | [
"Apache-2.0"
] | null | null | null | setup.py | bernt-matthias/argparse2tool | cca59571ad18f0bcc7b863ebbdd42a3480801aaf | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
from argparse2tool import __version__
setup(name="argparse2tool",
version=__version__,
description='Instrument for forming Galaxy XML and CWL tool descriptions from argparse arguments',
author='Helena Rasche, Anton Khodak',
author_email='hxr@hx42.org',
install_requires=['galaxyxml>=0.2.3', 'jinja2', 'future'],
url='https://github.com/erasche/argparse2tool',
packages=["argparse", "argparse2tool", "click", "argparse2tool.cmdline2gxml", "argparse2tool.cmdline2cwl"],
entry_points={
'console_scripts': [
'argparse2tool_check_path = argparse2tool.check_path:main',
]
},
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Environment :: Console',
'License :: OSI Approved :: Apache Software License',
],
include_package_data=True,
)
| 39 | 115 | 0.606838 |
acf0a013677e12e5b3a42e37423c4433372b557b | 4,666 | py | Python | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/chartsheet/tests/test_custom.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 4 | 2018-07-04T17:20:12.000Z | 2019-07-14T18:07:25.000Z | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/chartsheet/tests/test_custom.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | null | null | null | SprityBird/spritybird/python3.5/lib/python3.5/site-packages/openpyxl/chartsheet/tests/test_custom.py | MobileAnalytics/iPython-Framework | da0e598308c067cd5c5290a6364b3ffaf2d2418f | [
"MIT"
] | 1 | 2018-09-03T03:02:06.000Z | 2018-09-03T03:02:06.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
import pytest
from openpyxl.worksheet import PageMargins
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def CustomChartsheetView():
from ..custom import CustomChartsheetView
return CustomChartsheetView
class TestCustomChartsheetView:
def test_read(self, CustomChartsheetView):
src = """
<customSheetView guid="{C43F44F8-8CE9-4A07-A9A9-0646C7C6B826}" scale="88" zoomToFit="1">
<pageMargins left="0.23622047244094491" right="0.23622047244094491" top="0.74803149606299213" bottom="0.74803149606299213" header="0.31496062992125984" footer="0.31496062992125984" />
<pageSetup paperSize="7" orientation="landscape" r:id="rId1" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" />
<headerFooter/>
</customSheetView>
"""
xml = fromstring(src)
customChartsheetView = CustomChartsheetView.from_tree(xml)
assert customChartsheetView.state == 'visible'
assert customChartsheetView.scale == 88
assert customChartsheetView.pageMargins.left == 0.23622047244094491
def test_write(self, CustomChartsheetView):
pageMargins = PageMargins(left=0.2362204724409449, right=0.2362204724409449, top=0.7480314960629921,
bottom=0.7480314960629921, header=0.3149606299212598, footer=0.3149606299212598)
customChartsheetView = CustomChartsheetView(guid="{C43F44F8-8CE9-4A07-A9A9-0646C7C6B826}", scale=88,
zoomToFit=1,
pageMargins=pageMargins)
expected = """
<customSheetView guid="{C43F44F8-8CE9-4A07-A9A9-0646C7C6B826}" scale="88" state="visible" zoomToFit="1">
<pageMargins left="0.2362204724409449" right="0.2362204724409449" top="0.7480314960629921" bottom="0.7480314960629921" header="0.3149606299212598" footer="0.3149606299212598" />
</customSheetView>
"""
xml = tostring(customChartsheetView.to_tree())
diff = compare_xml(xml, expected)
assert diff is None, diff
@pytest.fixture
def CustomChartsheetViews():
from ..custom import CustomChartsheetViews
return CustomChartsheetViews
class TestCustomChartsheetViews:
def test_read(self, CustomChartsheetViews):
src = """
<customSheetViews>
<customSheetView guid="{C43F44F8-8CE9-4A07-A9A9-0646C7C6B826}" scale="88" zoomToFit="1">
<pageMargins left="0.23622047244094491" right="0.23622047244094491" top="0.74803149606299213" bottom="0.74803149606299213" header="0.31496062992125984" footer="0.31496062992125984" />
<pageSetup paperSize="7" orientation="landscape" r:id="rId1" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships" />
<headerFooter/>
</customSheetView>
</customSheetViews>
"""
xml = fromstring(src)
customChartsheetViews = CustomChartsheetViews.from_tree(xml)
assert customChartsheetViews.customSheetView[0].state == 'visible'
assert customChartsheetViews.customSheetView[0].scale == 88
assert customChartsheetViews.customSheetView[0].pageMargins.left == 0.23622047244094491
def test_write(self, CustomChartsheetViews):
from ..custom import CustomChartsheetView
pageMargins = PageMargins(left=0.2362204724409449, right=0.2362204724409449, top=0.7480314960629921,
bottom=0.7480314960629921, header=0.3149606299212598, footer=0.3149606299212598)
customChartsheetView = CustomChartsheetView(guid="{C43F44F8-8CE9-4A07-A9A9-0646C7C6B826}", scale=88,
zoomToFit=1,
pageMargins=pageMargins)
customChartsheetViews = CustomChartsheetViews(customSheetView=[customChartsheetView])
expected = """
<customSheetViews>
<customSheetView guid="{C43F44F8-8CE9-4A07-A9A9-0646C7C6B826}" scale="88" state="visible" zoomToFit="1">
<pageMargins left="0.2362204724409449" right="0.2362204724409449" top="0.7480314960629921" bottom="0.7480314960629921" header="0.3149606299212598" footer="0.3149606299212598" />
</customSheetView>
</customSheetViews>
"""
xml = tostring(customChartsheetViews.to_tree())
diff = compare_xml(xml, expected)
assert diff is None, diff
| 49.115789 | 199 | 0.674239 |
acf0a03ee2ccd4d349edea540abe6e02eb765aa5 | 388 | py | Python | solutions/0216_Linked-List-Deletion/solution.py | Hsins/BinarySearch | 7c818951f6a53e40fc76ac5d194f7beadf3a46a7 | [
"MIT"
] | null | null | null | solutions/0216_Linked-List-Deletion/solution.py | Hsins/BinarySearch | 7c818951f6a53e40fc76ac5d194f7beadf3a46a7 | [
"MIT"
] | null | null | null | solutions/0216_Linked-List-Deletion/solution.py | Hsins/BinarySearch | 7c818951f6a53e40fc76ac5d194f7beadf3a46a7 | [
"MIT"
] | null | null | null | # class LLNode:
# def __init__(self, val, next=None):
# self.val = val
# self.next = next
class Solution:
def solve(self, node, target):
prev = curr = LLNode(None, node)
while curr.next:
if curr.next.val == target:
curr.next = curr.next.next
else:
curr = curr.next
return prev.next
| 24.25 | 42 | 0.510309 |
acf0a0af505c6ddcb05bcec3645728ca0a9fd457 | 236,909 | py | Python | buildscripts/cpplint.py | OnTheWay2015/mongo_3.4_modify | 53e7f8550007965ce333c0f7301b849491655ee4 | [
"Apache-2.0"
] | 12 | 2020-04-27T21:31:57.000Z | 2020-12-13T13:25:06.000Z | buildscripts/cpplint.py | OnTheWay2015/mongo_3.4_modify | 53e7f8550007965ce333c0f7301b849491655ee4 | [
"Apache-2.0"
] | null | null | null | buildscripts/cpplint.py | OnTheWay2015/mongo_3.4_modify | 53e7f8550007965ce333c0f7301b849491655ee4 | [
"Apache-2.0"
] | 4 | 2021-03-27T14:40:25.000Z | 2022-03-19T20:52:41.000Z | #!/usr/bin/env python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
cpplint.py supports per-directory configurations specified in CPPLINT.cfg
files. CPPLINT.cfg file can contain a number of key=value pairs.
Currently the following options are supported:
set noparent
filter=+filter1,-filter2,...
exclude_files=regex
linelength=80
"set noparent" option prevents cpplint from traversing directory tree
upwards looking for more .cfg files in parent directories. This option
is usually placed in the top-level project directory.
The "filter" option is similar in function to --filter flag. It specifies
message filters in addition to the |_DEFAULT_FILTERS| and those specified
through --filter command-line flag.
"exclude_files" allows to specify a regular expression to be matched against
a file name. If the expression matches, the file is skipped and not run
through liner.
"linelength" allows to specify the allowed line length for the project.
CPPLINT.cfg has an effect on files in the same directory and all
sub-directories, unless overridden by a nested configuration file.
Example file:
filter=-build/include_order,+build/include_alpha
exclude_files=.*\.cc
The above example disables build/include_order warning and enables
build/include_alpha as well as excludes all .cc from being
processed by linter, in the current directory (where the .cfg
file is located) and all sub-directories.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'mongo/polyfill',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/inheritance',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/indentation_namespace',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# These headers are excluded from [build/include] and [build/include_order]
# checks:
# - Anything not following google file name conventions (containing an
# uppercase character, such as Python.h or nsStringAPI.h, for example).
# - Lua headers.
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
if matched:
if matched.group(1):
suppressed_line = linenum + 1
else:
suppressed_line = linenum
category = matched.group(2)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(suppressed_line)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(suppressed_line)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"""Resets the set of NOLINT suppressions to empty."""
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(object):
"""Tracks line numbers for includes, and the order in which includes appear.
include_list contains list of lists of (header, line number) pairs.
It's a lists of lists rather than just one flat list to make it
easier to update across preprocessor boundaries.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
self.include_list = [[]]
self.ResetSection('')
def FindHeader(self, header):
"""Check if a header has already been included.
Args:
header: header to check.
Returns:
Line number of previous occurrence, or -1 if the header has not
been seen before.
"""
for section_list in self.include_list:
for f in section_list:
if f[0] == header:
return f[1]
return -1
def ResetSection(self, directive):
"""Reset section checking for preprocessor directive.
Args:
directive: preprocessor directive (e.g. "if", "else").
"""
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
# Update list of includes. Note that we never pop from the
# include list.
if directive in ('if', 'ifdef', 'ifndef'):
self.include_list.append([])
elif directive in ('else', 'elif'):
self.include_list[-1] = []
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
# backup of filter list. Used to restore the state after each file.
self._filters_backup = self.filters[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
self.AddFilters(filters)
def AddFilters(self, filters):
""" Adds more filters to the existing list of error-message filters. """
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def BackupFilters(self):
""" Saves the current filter list to backup storage."""
self._filters_backup = self.filters[:]
def RestoreFilters(self):
""" Restores filters previously backed up."""
self.filters = self._filters_backup[:]
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
def _AddFilters(filters):
"""Adds more filter overrides.
Unlike _SetFilters, this function does not reset the current list of filters
available.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.AddFilters(filters)
def _BackupFilters():
""" Saves the current filter list to backup storage."""
_cpplint_state.BackupFilters()
def _RestoreFilters():
""" Restores filters previously backed up."""
_cpplint_state.RestoreFilters()
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo(object):
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def make_polyfill_regex():
polyfill_required_names = [
'_',
'adopt_lock',
'align',
'async',
'bind',
'chrono',
'condition_variable',
'condition_variable_any',
'cref',
'cv_status',
'defer_lock',
'function',
'future',
'future_status',
'launch',
'lock_guard',
'make_unique',
'mutex',
'packaged_task',
'placeholders',
'promise',
'recursive_mutex',
'ref',
'shared_lock,',
'shared_mutex',
'shared_timed_mutex',
'this_thread(?!::at_thread_exit)',
'thread',
'timed_mutex',
'try_to_lock',
'unique_lock',
'unordered_map',
'unordered_multimap',
'unordered_multiset',
'unordered_set',
]
qualified_names = ['boost::' + name + "\\b" for name in polyfill_required_names]
qualified_names.extend('std::' + name + "\\b" for name in polyfill_required_names)
qualified_names_regex = '|'.join(qualified_names)
return re.compile(qualified_names_regex)
_RE_PATTERN_MONGO_POLYFILL=make_polyfill_regex()
def CheckForMongoPolyfill(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if re.search(_RE_PATTERN_MONGO_POLYFILL, line):
error(filename, linenum, 'mongodb/polyfill', 5,
'Illegal use of banned name from std::/boost::, use mongo::stdx:: variant instead')
def CheckForMongoAtomic(filename, clean_lines, linenum, error):
line = clean_lines.elided[linenum]
if re.search('std::atomic', line):
error(filename, linenum, 'mongodb/stdatomic', 5,
'Illegal use of prohibited std::atomic<T>, use AtomicWord<T> or other types '
'from "mongo/platform/atomic_word.h"')
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# Don't check for header guards if there are error suppression
# comments somewhere in this file.
#
# Because this is silencing a warning for a nonexistent line, we
# only support the very specific NOLINT(build/header_guard) syntax,
# and not the general NOLINT or NOLINT(*) syntax.
for i in lines:
if Search(r'//\s*NOLINT\(build/header_guard\)', i):
return
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
def IsMacroDefinition(clean_lines, linenum):
if Search(r'^#define', clean_lines[linenum]):
return True
if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
return True
return False
def IsForwardClassDeclaration(clean_lines, linenum):
return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
self.check_namespace_indentation = False
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
self.check_namespace_indentation = True
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
self.check_namespace_indentation = True
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 1 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +1 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style. Also look for
# non-single-argument constructors which are also technically valid, but
# strongly suggest something is wrong.
explicit_constructor_match = Match(
r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
r'\(((?:[^()]|\([^()]*\))*)\)'
% re.escape(base_classname),
line)
if explicit_constructor_match:
is_marked_explicit = explicit_constructor_match.group(1)
if not explicit_constructor_match.group(2):
constructor_args = []
else:
constructor_args = explicit_constructor_match.group(2).split(',')
# collapse arguments so that commas in template parameter lists and function
# argument parameter lists don't split arguments in two
i = 0
while i < len(constructor_args):
constructor_arg = constructor_args[i]
while (constructor_arg.count('<') > constructor_arg.count('>') or
constructor_arg.count('(') > constructor_arg.count(')')):
constructor_arg += ',' + constructor_args[i + 1]
del constructor_args[i + 1]
constructor_args[i] = constructor_arg
i += 1
defaulted_args = [arg for arg in constructor_args if '=' in arg]
noarg_constructor = (not constructor_args or # empty arg list
# 'void' arg specifier
(len(constructor_args) == 1 and
constructor_args[0].strip() == 'void'))
onearg_constructor = ((len(constructor_args) == 1 and # exactly one arg
not noarg_constructor) or
# all but at most one arg defaulted
(len(constructor_args) >= 1 and
not noarg_constructor and
len(defaulted_args) >= len(constructor_args) - 1))
initializer_list_constructor = bool(
onearg_constructor and
Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
copy_constructor = bool(
onearg_constructor and
Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), constructor_args[0].strip()))
if (not is_marked_explicit and
onearg_constructor and
not initializer_list_constructor and
not copy_constructor):
if defaulted_args:
error(filename, linenum, 'runtime/explicit', 5,
'Constructors callable with one argument '
'should be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 5,
'Single-parameter constructors should be marked explicit.')
elif is_marked_explicit and not onearg_constructor:
if noarg_constructor:
error(filename, linenum, 'runtime/explicit', 5,
'Zero-parameter constructors should not be marked explicit.')
else:
error(filename, linenum, 'runtime/explicit', 0,
'Constructors that require multiple arguments '
'should not be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error):
is_namespace_indent_item = (
len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
nesting_state.previous_stack_top == nesting_state.stack[-2])
if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
clean_lines.elided, line):
CheckItemIndentationInNamespace(filename, clean_lines.elided,
line, error)
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
#
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
Search(r',[^,\s]', raw[linenum])):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsDeletedOrDefault(clean_lines, linenum):
"""Check if current constructor or operator is deleted or default.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if this is a deleted or default constructor.
"""
open_paren = clean_lines.elided[linenum].find('(')
if open_paren < 0:
return False
(close_line, _, close_paren) = CloseExpression(
clean_lines, linenum, open_paren)
if close_paren < 0:
return False
return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
def IsRValueAllowed(clean_lines, linenum):
"""Check if RValue reference is allowed on a particular line.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if line is within the region where RValue references are allowed.
"""
# Allow region marked by PUSH/POP macros
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
# Allow operator=
line = clean_lines.elided[linenum]
if Search(r'\boperator\s*=\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
# Allow constructors
match = Match(r'\s*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
if match and match.group(1) == match.group(2):
return IsDeletedOrDefault(clean_lines, linenum)
if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
return IsDeletedOrDefault(clean_lines, linenum)
if Match(r'\s*[\w<>]+\s*\(', line):
previous_line = 'ReturnType'
if linenum > 0:
previous_line = clean_lines.elided[linenum - 1]
if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
return IsDeletedOrDefault(clean_lines, linenum)
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone is using
# braces in a block to explicitly create a new scope, which is commonly used
# to control the lifetime of stack-allocated variables. Braces are also
# used for brace initializers inside function calls. We don't detect this
# perfectly: we just don't complain if the last non-whitespace character on
# the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# previous line starts a preprocessor block.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if (not Search(r'[,;:}{(]\s*$', prevline) and
not Match(r'\s*#', prevline)):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'else if\s*\(', line): # could be multi-line if
brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
brace_on_right = endline[endpos:].find('{') != -1
if brace_on_left != brace_on_right: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing.
# Output a warning if the semicolon is not contained inside
# a lambda expression.
if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
endline):
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
else:
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
# If the check macro is followed by something other than a
# semicolon, assume users will log their own custom error messages
# and don't suggest any replacements.
if not Match(r'\s*;', last_line[end_pos:]):
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
# Only do this check if the included header follows google naming
# conventions. If not, assume that it's a 3rd party API that
# requires special include conventions.
#
# We also make an exception for Lua headers, which follow google
# naming convention but not the include convention.
match = Match(r'#include\s*"([^/]+\.h)"', line)
if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
duplicate_line = include_state.FindHeader(include)
if duplicate_line >= 0:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, duplicate_line))
elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
include_state.include_list[-1].append((include, linenum))
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
else:
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
if match:
include_state.ResetSection(match.group(1))
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes declare or disable copy/assign
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_COPY_AND_ASSIGN DISALLOW_IMPLICIT_CONSTRUCTORS is present,
# then it should be the last thing in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Match two lines at a time to support multiline declarations
if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
line += clean_lines.elided[linenum + 1].strip()
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\s*\(', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\s*\(', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Scan back a few lines for start of current function
for i in xrange(linenum, max(-1, linenum - 10), -1):
match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
if match:
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(
clean_lines, i, len(match.group(1)))
return (closing_paren >= 0 and
Search(r'\boverride\b', line[closing_paren:]))
return False
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid initializer lists. We only need to scan back from the
# current line for something that starts with ':'.
#
# We don't need to check the current line, since the '&' would
# appear inside the second set of parentheses on the current line as
# opposed to the first set.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 10), -1):
previous_line = clean_lines.elided[i]
if not Search(r'[),]\s*$', previous_line):
break
if Match(r'^\s*:\s+\S', previous_line):
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer: ' +
ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Avoid arrays by looking for brackets that come after the closing
# parenthesis.
if Match(r'\([^()]+\)\s*\[', match.group(3)):
return
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
#
# Some non-identifier character is required before the '&' for the
# expression to be recognized as a cast. These are casts:
# expression = &static_cast<int*>(temporary());
# function(&(int*)(temporary()));
#
# This is not a cast:
# reference_type&(int* function_param);
match = Search(
r'(?:[^\w]&\(([^)]+)\)[\w(])|'
r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
line = clean_lines.elided[linenum]
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts
context = line[0:match.start(1) - 1]
if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
return False
# Try expanding current context to see if we one level of
# parentheses inside a macro.
if linenum > 0:
for i in xrange(linenum - 1, max(0, linenum - 5), -1):
context = clean_lines.elided[i] + context
if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
return False
# operator++(int) and operator--(int)
if context.endswith(' operator++') or context.endswith(' operator--'):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# Function((function_pointer_arg)(int), int param)
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),])',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
raw_line = clean_lines.raw_lines[linenum]
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_dict, io=codecs):
"""Fill up the include_dict with new includes found from the file.
Args:
filename: the name of the header to read.
include_dict: a dictionary in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was successfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
include_dict.setdefault(include, linenum)
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's flatten the include_state include_list and copy it into a dictionary.
include_dict = dict([item for sublist in include_state.include_list
for item in sublist])
# Did we find the header for this file (if any) and successfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_dict is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_dict.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_dict, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_dict:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "virtual" function-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Look for "virtual" on current line.
line = clean_lines.elided[linenum]
virtual = Match(r'^(.*\bvirtual\b)', line)
if not virtual: return
# Look for the next opening parenthesis. This is the start of the
# parameter list (possibly on the next line shortly after virtual).
# TODO(unknown): doesn't work if there are virtual functions with
# decltype() or other things that use parentheses, but csearch suggests
# that this is rare.
end_col = -1
end_line = -1
start_col = len(virtual.group(1))
for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
line = clean_lines.elided[start_line][start_col:]
parameter_list = Match(r'^([^(]*)\(', line)
if parameter_list:
# Match parentheses to find the end of the parameter list
(_, end_line, end_col) = CloseExpression(
clean_lines, start_line, start_col + len(parameter_list.group(1)))
break
start_col = 0
if end_col < 0:
return # Couldn't find end of parameter list, give up
# Look for "override" or "final" after the parameter list
# (possibly on the next few lines).
for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
line = clean_lines.elided[i][end_col:]
match = Search(r'\b(override|final)\b', line)
if match:
error(filename, linenum, 'readability/inheritance', 4,
('"virtual" is redundant since function is '
'already declared as "%s"' % match.group(1)))
# Set end_col to check whole lines after we are done with the
# first line.
end_col = 0
if Search(r'[^\w]\s*$', line):
break
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
"""Check if line contains a redundant "override" or "final" virt-specifier.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Check that at most one of "override" or "final" is present, not both
line = clean_lines.elided[linenum]
if Search(r'\boverride\b', line) and Search(r'\bfinal\b', line):
error(filename, linenum, 'readability/inheritance', 4,
('"override" is redundant since function is '
'already declared as "final"'))
# Returns true if we are at a new block, and it is directly
# inside of a namespace.
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
"""Checks that the new block is directly in a namespace.
Args:
nesting_state: The _NestingState object that contains info about our state.
is_forward_declaration: If the class is a forward declared class.
Returns:
Whether or not the new block is directly in a namespace.
"""
if is_forward_declaration:
if len(nesting_state.stack) >= 1 and (
isinstance(nesting_state.stack[-1], _NamespaceInfo)):
return True
else:
return False
return (len(nesting_state.stack) > 1 and
nesting_state.stack[-1].check_namespace_indentation and
isinstance(nesting_state.stack[-2], _NamespaceInfo))
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
raw_lines_no_comments, linenum):
"""This method determines if we should apply our namespace indentation check.
Args:
nesting_state: The current nesting state.
is_namespace_indent_item: If we just put a new class on the stack, True.
If the top of the stack is not a class, or we did not recently
add the class, False.
raw_lines_no_comments: The lines without the comments.
linenum: The current line number we are processing.
Returns:
True if we should apply our namespace indentation check. Currently, it
only works for classes and namespaces inside of a namespace.
"""
is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
linenum)
if not (is_namespace_indent_item or is_forward_declaration):
return False
# If we are in a macro, we do not want to check the namespace indentation.
if IsMacroDefinition(raw_lines_no_comments, linenum):
return False
return IsBlockInNameSpace(nesting_state, is_forward_declaration)
# Call this method if the line is directly inside of a namespace.
# If the line above is blank (excluding comments) or the start of
# an inner namespace, it cannot be indented.
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
error):
line = raw_lines_no_comments[linenum]
if Match(r'^\s+', line):
error(filename, linenum, 'runtime/indentation_namespace', 4,
'Do not indent within a namespace')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
error)
CheckForMongoPolyfill(filename, clean_lines, line, error)
CheckForMongoAtomic(filename, clean_lines, line, error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
CheckRedundantVirtual(filename, clean_lines, line, error)
CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
if include and include.group(1) in ('cfenv',
'condition_variable',
'fenv.h',
'future',
'mutex',
'thread',
'chrono',
'ratio',
'regex',
'system_error',
):
error(filename, linenum, 'build/c++11', 5,
('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessConfigOverrides(filename):
""" Loads the configuration files and processes the config overrides.
Args:
filename: The name of the file being processed by the linter.
Returns:
False if the current |filename| should not be processed further.
"""
abs_filename = os.path.abspath(filename)
cfg_filters = []
keep_looking = True
while keep_looking:
abs_path, base_name = os.path.split(abs_filename)
if not base_name:
break # Reached the root directory.
cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
abs_filename = abs_path
if not os.path.isfile(cfg_file):
continue
try:
with open(cfg_file) as file_handle:
for line in file_handle:
line, _, _ = line.partition('#') # Remove comments.
if not line.strip():
continue
name, _, val = line.partition('=')
name = name.strip()
val = val.strip()
if name == 'set noparent':
keep_looking = False
elif name == 'filter':
cfg_filters.append(val)
elif name == 'exclude_files':
# When matching exclude_files pattern, use the base_name of
# the current file name or the directory name we are processing.
# For example, if we are checking for lint errors in /foo/bar/baz.cc
# and we found the .cfg file at /foo/CPPLINT.cfg, then the config
# file's "exclude_files" filter is meant to be checked against "bar"
# and not "baz" nor "bar/baz.cc".
if base_name:
pattern = re.compile(val)
if pattern.match(base_name):
sys.stderr.write('Ignoring "%s": file excluded by "%s". '
'File path component "%s" matches '
'pattern "%s"\n' %
(filename, cfg_file, base_name, val))
return False
elif name == 'linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
sys.stderr.write('Line length must be numeric.')
else:
sys.stderr.write(
'Invalid configuration option (%s) in file %s\n' %
(name, cfg_file))
except IOError:
sys.stderr.write(
"Skipping config file '%s': Can't open for reading\n" % cfg_file)
keep_looking = False
# Apply all the accumulated filters in reverse order (top-level directory
# config options having the least priority).
for filter in reversed(cfg_filters):
_AddFilters(filter)
return True
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
_BackupFilters()
if not ProcessConfigOverrides(filename):
_RestoreFilters()
return
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
_RestoreFilters()
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
#sys.stderr.write('Done processing %s\n' % filename)
_RestoreFilters()
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| 38.180338 | 97 | 0.651681 |
acf0a0ed0aeee41e7bd9e92fbbc7cd280b83f0fd | 1,169 | py | Python | examples/lib_and_noinst_test_programs/examples/fortran/__init__.py | tjgiese/atizer | b8cdb8f4bac7cedfb566d766acee5fe0cc7a7bd3 | [
"MIT"
] | null | null | null | examples/lib_and_noinst_test_programs/examples/fortran/__init__.py | tjgiese/atizer | b8cdb8f4bac7cedfb566d766acee5fe0cc7a7bd3 | [
"MIT"
] | null | null | null | examples/lib_and_noinst_test_programs/examples/fortran/__init__.py | tjgiese/atizer | b8cdb8f4bac7cedfb566d766acee5fe0cc7a7bd3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from atizer import *
from atizer.dbase import *
import sys
sys.path.insert(0,here() + "/../..")
from targets import *
del sys.path[0]
class fortran_example(autoprog):
def __init__( self, srcdir=None ):
super( fortran_example, self ).__init__( "fortran_example", srcdir )
self.copyright_holder = "Timothy J. Giese"
self.license = licenses.MIT
## @brief List of autolib objects representing library dependencies
self.libs = [ tdbsc() ]
## @brief List of filenames to be distributed, but not installed
self.dist_noinst_SCRIPTS = []
self.EXTRA_DIST = [ "ade.xyz", "ade.2dbspl" ]
## @brief If True, then compile the target without installing it
# (default False)
self.noinst = True
## @brief If True, "make doxygen-doc" create documentation html
self.doxygen = False
# self.enable_openmp()
# self.enable_mpi()
package = autopackage(
"fortran_example",
targets=[ fortran_example( here() ) ],
subdirs=[],
version="0.1",
apiversion="0:0:0")
if __name__ == "__main__":
package.configure()
| 26.568182 | 76 | 0.626176 |
acf0a28fdd80acd6034f84274222553577c12aa9 | 1,958 | py | Python | ooobuild/lo/beans/x_hierarchical_property_set_info.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/beans/x_hierarchical_property_set_info.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/beans/x_hierarchical_property_set_info.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.beans
import typing
from abc import abstractmethod
from ..uno.x_interface import XInterface as XInterface_8f010a43
if typing.TYPE_CHECKING:
from .property import Property as Property_8f4e0a76
class XHierarchicalPropertySetInfo(XInterface_8f010a43):
"""
specifies a hierarchy of properties.
The specification only describes the properties, it does not contain any values.
See Also:
`API XHierarchicalPropertySetInfo <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1beans_1_1XHierarchicalPropertySetInfo.html>`_
"""
__ooo_ns__: str = 'com.sun.star.beans'
__ooo_full_ns__: str = 'com.sun.star.beans.XHierarchicalPropertySetInfo'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.beans.XHierarchicalPropertySetInfo'
@abstractmethod
def getPropertyByHierarchicalName(self, aHierarchicalName: str) -> 'Property_8f4e0a76':
"""
Raises:
com.sun.star.beans.UnknownPropertyException: ``UnknownPropertyException``
"""
@abstractmethod
def hasPropertyByHierarchicalName(self, aHierarchicalName: str) -> bool:
"""
"""
__all__ = ['XHierarchicalPropertySetInfo']
| 35.6 | 160 | 0.744637 |
acf0a2a7371ff620ad4f19ffa1a1aced7d192410 | 129 | py | Python | pyquote/__init__.py | souravaich/pyquote | ac48640d238f4e8d5c20b206180144b82d47cb20 | [
"MIT"
] | 1 | 2020-12-31T19:54:00.000Z | 2020-12-31T19:54:00.000Z | pyquote/__init__.py | souravaich/pyquote | ac48640d238f4e8d5c20b206180144b82d47cb20 | [
"MIT"
] | null | null | null | pyquote/__init__.py | souravaich/pyquote | ac48640d238f4e8d5c20b206180144b82d47cb20 | [
"MIT"
] | null | null | null | """Top-level package for pyquote."""
__author__ = """Sourav Aich"""
__email__ = 'souravaich.ds@gmail.com'
__version__ = '0.0.1'
| 21.5 | 37 | 0.682171 |
acf0a3200656e4180f24718f081c9feac44fb475 | 412 | py | Python | jp.atcoder/abc062/arc074_a/9028883.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc062/arc074_a/9028883.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc062/arc074_a/9028883.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | import sys
H, W = map(int, sys.stdin.readline().split())
def minimize(H, W):
h = round(H / 3)
a = h * W
w = W // 2
b = (H - h) * w
c = (H - h) * (W - w)
return max(a, b, c) - min(a, b, c)
def main():
if H % 3 == 0 or W % 3 == 0:
return 0
return min(minimize(H, W), minimize(W, H))
if __name__ == "__main__":
ans = main()
print(ans)
| 16.48 | 47 | 0.432039 |
acf0a33e37465d5afb16db658dfbaeb2038886fe | 6,714 | py | Python | draw_card/update_game_simple_info.py | yuan488/nonebot_plugin_gamedraw | 6d6fda5e8051026027b9eec2550e12184dc53674 | [
"MIT"
] | 1 | 2021-06-21T16:53:00.000Z | 2021-06-21T16:53:00.000Z | draw_card/update_game_simple_info.py | yuan488/nonebot_plugin_gamedraw | 6d6fda5e8051026027b9eec2550e12184dc53674 | [
"MIT"
] | null | null | null | draw_card/update_game_simple_info.py | yuan488/nonebot_plugin_gamedraw | 6d6fda5e8051026027b9eec2550e12184dc53674 | [
"MIT"
] | null | null | null | import aiohttp
from .config import DRAW_PATH, SEMAPHORE
from asyncio.exceptions import TimeoutError
from bs4 import BeautifulSoup
from .util import download_img
from .util import remove_prohibited_str
from urllib.parse import unquote
import bs4
import asyncio
try:
import ujson as json
except ModuleNotFoundError:
import json
headers = {'User-Agent': '"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)"'}
async def update_simple_info(url: str, game_name: str) -> 'dict, int':
try:
with open(DRAW_PATH + f'{game_name}.json', 'r', encoding='utf8') as f:
data = json.load(f)
except (ValueError, FileNotFoundError):
data = {}
try:
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(url, timeout=7) as response:
soup = BeautifulSoup(await response.text(), 'lxml')
divs = get_char_divs(soup, game_name)
for div in divs:
type_lst = get_type_lst(div, game_name)
index = 0
for char_lst in type_lst:
contents = get_char_lst_contents(char_lst, game_name)
for char in contents:
data = await retrieve_char_data(char, game_name, data, index)
index += 1
data = await _last_check(data, game_name, session)
except TimeoutError:
print(f'更新 {game_name} 超时...')
return {}, 999
with open(DRAW_PATH + f'{game_name}.json', 'w', encoding='utf8') as wf:
wf.write(json.dumps(data, ensure_ascii=False, indent=4))
return data, 200
# 获取所有包含需要图片的divs
def get_char_divs(soup: bs4.BeautifulSoup, game_name: str) -> bs4.element.ResultSet:
if game_name == 'pcr':
return soup.find_all('div', {'class': 'tabbertab'})
if game_name == 'azur':
return soup.find_all('div', {'class': 'resp-tabs'})
# 拿到所有类型
def get_type_lst(div: bs4.element.Tag, game_name: str):
if game_name in ['pcr', 'azur']:
return div.find('div', {'class': 'resp-tabs-container'}).find_all('div', {'class': 'resp-tab-content'})
# 获取所有角色div
def get_char_lst_contents(char_lst: bs4.element.Tag, game_name: str):
contents = []
# print(len(char_lst.find_all('tr')))
if game_name == 'pcr':
contents = char_lst.contents
if game_name == 'azur':
contents = char_lst.find('table').find('tbody').contents[-1].find('td').contents
return [x for x in contents if x != '\n']
# 额外数据
async def _last_check(data: dict, game_name: str, session: aiohttp.ClientSession) -> dict:
if game_name == 'azur':
idx = 1
for url in [
'https://patchwiki.biligame.com/images/blhx/thumb/1/15/pxho13xsnkyb546tftvh49etzdh74cf.png/60px'
'-舰娘头像外框普通.png',
'https://patchwiki.biligame.com/images/blhx/thumb/a/a9/k8t7nx6c8pan5vyr8z21txp45jxeo66.png/60px'
'-舰娘头像外框稀有.png',
'https://patchwiki.biligame.com/images/blhx/thumb/a/a5/5whkzvt200zwhhx0h0iz9qo1kldnidj.png/60px'
'-舰娘头像外框精锐.png',
'https://patchwiki.biligame.com/images/blhx/thumb/a/a2/ptog1j220x5q02hytpwc8al7f229qk9.png/60px-'
'舰娘头像外框超稀有.png'
]:
await download_img(url, 'azur', f'{idx}_star')
idx += 1
tasks = []
semaphore = asyncio.Semaphore(SEMAPHORE)
for key in data.keys():
tasks.append(asyncio.ensure_future(_async_update_azur_extra_info(key, session, semaphore)))
asyResult = await asyncio.gather(*tasks)
for x in asyResult:
for key in x.keys():
data[key]['获取途径'] = x[key]['获取途径']
return data
azur_type = {
'0': '驱逐',
'1': '轻巡',
'2': '重巡',
'3': '超巡',
'4': '战巡',
'5': '战列',
'6': '航母',
'7': '航站',
'8': '轻航',
'9': '重炮',
'10': '维修',
'11': '潜艇',
'12': '运输',
}
# 整理数据
async def retrieve_char_data(char: bs4.element.Tag, game_name: str, data: dict, index: int = 0) -> dict:
member_dict = {}
if game_name == 'pcr':
member_dict = {
'头像': unquote(char.find('img', {'class': 'img-kk'})['src']),
'名称': remove_prohibited_str(char.find('a')['title']),
'星级': 3 - index}
if game_name == 'azur':
char = char.find('td').find('div')
avatar_img = char.find('a').find('img')
try:
member_dict['头像'] = unquote(str(avatar_img['srcset']).split(' ')[-2])
except KeyError:
member_dict['头像'] = unquote(str(avatar_img['src']).split(' ')[-2])
member_dict['名称'] = remove_prohibited_str(str(avatar_img['alt'])[: str(avatar_img['alt']).find('头像')])
star = char.find('div').find('img')['alt']
if star == '舰娘头像外框普通.png':
star = 1
elif star == '舰娘头像外框稀有.png':
star = 2
elif star == '舰娘头像外框精锐.png':
star = 3
elif star == '舰娘头像外框超稀有.png':
star = 4
elif star == '舰娘头像外框海上传奇.png':
star = 5
elif star in ['舰娘头像外框最高方案.png', '舰娘头像外框决战方案.png', '舰娘头像外框超稀有META.png']:
star = 6
member_dict['星级'] = star
member_dict['类型'] = azur_type[str(index)]
await download_img(member_dict['头像'], game_name, member_dict['名称'])
data[member_dict['名称']] = member_dict
print(f'{member_dict["名称"]} is update...')
return data
async def _async_update_azur_extra_info(key: str, session: aiohttp.ClientSession, semaphore):
if key[-1] == '改':
return {key: {'获取途径': ['无法建造']}}
async with semaphore:
for i in range(20):
try:
async with session.get(f'https://wiki.biligame.com/blhx/{key}', timeout=7) as res:
soup = BeautifulSoup(await res.text(), 'lxml')
construction_time = str(soup.find('table', {'class': 'wikitable sv-general'}).find('tbody'))
x = {key: {'获取途径': []}}
if construction_time.find('无法建造') != -1:
x[key]['获取途径'].append('无法建造')
elif construction_time.find('活动已关闭') != -1:
x[key]['获取途径'].append('活动限定')
else:
x[key]['获取途径'].append('可以建造')
print(f'碧蓝航线获取额外信息 {key}...{x[key]["获取途径"]}')
return x
except TimeoutError:
print(f'访问 https://wiki.biligame.com/blhx/{key} 第 {i}次 超时...已再次访问')
return {}
| 38.365714 | 113 | 0.550492 |
acf0a34b73b65a62d4b0108a4809da75cb426d68 | 681 | py | Python | reservations/tests/common.py | kkarolis/cct-subsilocus | b2fd38a184550203410aa29248a6e5d6160acc19 | [
"MIT"
] | 1 | 2020-03-04T18:23:30.000Z | 2020-03-04T18:23:30.000Z | reservations/tests/common.py | kkarolis/cct-subsilocus | b2fd38a184550203410aa29248a6e5d6160acc19 | [
"MIT"
] | 5 | 2021-03-18T23:45:40.000Z | 2021-06-10T18:27:09.000Z | reservations/tests/common.py | kkarolis/cct-subsilocus | b2fd38a184550203410aa29248a6e5d6160acc19 | [
"MIT"
] | null | null | null | import contextlib
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
class APITestCommonCase(APITestCase):
fixtures = ["demo_data.json"]
def setUp(self):
super().setUp()
self.user = User.objects.get(pk=1)
@contextlib.contextmanager
def get_authenticated_client(self, user=None):
client = self.client
try:
client.force_authenticate(user=user if user is not None else self.user)
yield client
finally:
client.logout()
def assertStatusCode(self, status_code, response):
self.assertEqual(status_code, response.status_code, response.data)
| 26.192308 | 83 | 0.679883 |
acf0a36a747e8bbf09674d7ac9c556c2418c6a57 | 6,202 | py | Python | tests/libs/python/stupendous_cow/importer/generic_ss/configuration_tests.py | tomault/stupendous-cow | 95955a52fa1aa70ded564ffeb7780ada66ab3741 | [
"Apache-2.0"
] | null | null | null | tests/libs/python/stupendous_cow/importer/generic_ss/configuration_tests.py | tomault/stupendous-cow | 95955a52fa1aa70ded564ffeb7780ada66ab3741 | [
"Apache-2.0"
] | null | null | null | tests/libs/python/stupendous_cow/importer/generic_ss/configuration_tests.py | tomault/stupendous-cow | 95955a52fa1aa70ded564ffeb7780ada66ab3741 | [
"Apache-2.0"
] | null | null | null | from stupendous_cow.importer.generic_ss.configuration import *
from stupendous_cow.importer.spreadsheets import SpreadsheetPath
import yaml
import cStringIO
import unittest
class ConfigurationParserTests(unittest.TestCase):
def setUp(self):
self.parser = ConfigurationFileParser()
def test_parse_config(self):
dg_1 = self._create_document_group()
dg_2 = self._create_document_group(\
title = 'extracted', abstract = 'extracted',
content_dirs = [ '/home/cows/moo', '/home/peguins/wark' ],
priority = 4,
downloaded_as = SpreadsheetPath('NewPapers', 'DownloadedAs'),
article_type = 'Long Paper', category = 'Deep Learning',
summary_title = 'extracted',
summary_text = SpreadsheetPath('NewSummaries', 'SUMMARY_TITLE'),
is_read = 'False',
abstracts_file_reader = 'nips',
abstracts_file_name = 'nips2018_part2.txt',
article_extractor = 'special_pdf')
true_config = self._create_config_map(document_groups = [ dg_1, dg_2 ])
config = self.parser.load(stream = self._create_stream(true_config))
self._verify_config(true_config, config)
def _create_config_map(self, venue = 'NIPS', year = 2018,
document_groups = ()):
config_map = { 'Venue' : venue, 'Year' : year }
for (n, dg) in enumerate(document_groups):
config_map['DocumentGroup_%d' % (n + 1)] = dg
return config_map
def _create_document_group(self, title = SpreadsheetPath('Papers', 'TITLE'),
abstract = 'file',
content_dirs = [ '/home/tomault/conferences' ],
priority = SpreadsheetPath('Papers', 'PRIORITY'),
downloaded_as = SpreadsheetPath('Papers',
'DOWNLOADED_AS'),
article_type = SpreadsheetPath('Papers', 'TYPE'),
category = SpreadsheetPath('Papers', 'AREA'),
summary_title = SpreadsheetPath('Summaries',
'TITLE'),
summary_text = SpreadsheetPath('Summaries',
'SUMMARY'),
is_read = SpreadsheetPath('Papers', 'IS_READ'),
abstracts_file_reader = 'nips',
abstracts_file_name = 'nips2018_details.txt',
article_extractor = 'default_pdf'):
return { 'Title' : title, 'Abstract' : abstract,
'ContentDir' : content_dirs, 'Priority' : priority,
'DownloadedAs' : downloaded_as, 'ArticleType' : article_type,
'Category' : category, 'SummaryTitle' : summary_title,
'SummaryText' : summary_text, 'IsRead' : is_read,
'AbstractsFileReader' : abstracts_file_reader,
'AbstractsFileName' : abstracts_file_name,
'Extractor' : article_extractor }
def _create_stream(self, config):
def s(value):
if isinstance(value, SpreadsheetPath):
return '@%s[%s]' % (value.sheet, value.column)
return value
tmp = { 'Venue' : config['Venue'], 'Year' : config['Year'] }
for dg_name in (k for k in config if k.startswith('DocumentGroup_')):
dg = config[dg_name]
abstract_reader = '%s("%s")' % (dg['AbstractsFileReader'],
dg['AbstractsFileName'])
serialized = { 'Title' : s(dg['Title']),
'Abstract' : s(dg['Abstract']),
'ContentDir' : dg['ContentDir'],
'Priority' : s(dg['Priority']),
'DownloadedAs' : s(dg['DownloadedAs']),
'ArticleType' : s(dg['ArticleType']),
'Category' : s(dg['Category']),
'SummaryTitle' : s(dg['SummaryTitle']),
'SummaryText' : s(dg['SummaryText']),
'IsRead' : s(dg['IsRead']),
'AbstractsFileReader' : abstract_reader,
'Extractor' : dg['Extractor'] }
tmp[dg_name] = serialized
return cStringIO.StringIO(yaml.dump(tmp, default_flow_style = False))
def _verify_config(self, true_config, config):
self.assertEqual(true_config['Venue'], config.venue)
self.assertEqual(true_config['Year'], config.year)
true_dgs = sorted([ x for x in true_config \
if x.startswith('DocumentGroup_') ])
for (true_dg_name, dg) in zip(true_dgs, config.document_groups):
true_dg = true_config[true_dg_name]
self._verify_document_group(true_dg_name, true_dg, dg)
def _verify_document_group(self, true_name, true_dg, dg):
self.assertEqual(true_name, dg.config_name)
self.assertEqual(true_dg['Title'], dg.title_source)
self.assertEqual(true_dg['Abstract'], dg.abstract_source)
self.assertEqual(true_dg['ContentDir'], dg.content_dirs)
self.assertEqual(true_dg['Priority'], dg.priority_source)
self.assertEqual(true_dg['DownloadedAs'], dg.downloaded_as_source)
self.assertEqual(true_dg['ArticleType'], dg.article_type_source)
self.assertEqual(true_dg['Category'], dg.category_source)
self.assertEqual(true_dg['SummaryTitle'], dg.summary_title_source)
self.assertEqual(true_dg['SummaryText'], dg.summary_content_source)
self.assertEqual(true_dg['IsRead'], dg.is_read_source)
self.assertEqual(true_dg['AbstractsFileReader'],
dg.abstracts_file_reader)
self.assertEqual(true_dg['AbstractsFileName'], dg.abstracts_file_name)
self.assertEqual(true_dg['Extractor'], dg.article_extractor)
if __name__ == '__main__':
unittest.main()
| 53.465517 | 80 | 0.556756 |
acf0a393df416dfacfe99e1e1c4463a467653c8e | 676 | py | Python | Demo/sockets/echosvr.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/sockets/echosvr.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | Demo/sockets/echosvr.py | AtjonTV/Python-1.4 | 2a80562c5a163490f444181cb75ca1b3089759ec | [
"Unlicense",
"TCL",
"DOC",
"AAL",
"X11"
] | null | null | null | #! /usr/local/bin/python
# Python implementation of an 'echo' tcp server: echo all data it receives.
#
# This is the simplest possible server, servicing a single request only.
import sys
from socket import *
# The standard echo port isn't very useful, it requires root permissions!
# ECHO_PORT = 7
ECHO_PORT = 50000 + 7
BUFSIZE = 1024
def main():
if len(sys.argv) > 1:
port = int(eval(sys.argv[1]))
else:
port = ECHO_PORT
s = socket(AF_INET, SOCK_STREAM)
s.bind('', port)
s.listen(1)
conn, (remotehost, remoteport) = s.accept()
print 'connected by', remotehost, remoteport
while 1:
data = conn.recv(BUFSIZE)
if not data:
break
conn.send(data)
main()
| 21.125 | 75 | 0.698225 |
acf0a42c3a7ed2934cd0d2598bedda4b6da53813 | 11,292 | py | Python | fpiweb/tests/test_forms.py | Lissabella/Food-Pantry-Inventory | bee895defa204a687b50af4697fd5be33b098b0a | [
"MIT"
] | 22 | 2019-02-19T23:31:08.000Z | 2022-01-18T11:35:45.000Z | fpiweb/tests/test_forms.py | Lissabella/Food-Pantry-Inventory | bee895defa204a687b50af4697fd5be33b098b0a | [
"MIT"
] | 149 | 2019-02-20T21:20:22.000Z | 2022-03-11T23:49:08.000Z | fpiweb/tests/test_forms.py | deeppunster/Food-Pantry-Inventory | 3f264e1109776059bb84a82eda808a513b3c7907 | [
"MIT"
] | 28 | 2019-02-19T21:33:24.000Z | 2021-05-28T22:44:39.000Z |
__author__ = '(Multiple)'
__project__ = "Food-Pantry-Inventory"
__creation_date__ = "06/03/2019"
from django.db.models import Count
from django.test import TestCase
from fpiweb.forms import \
BoxItemForm, \
BuildPalletForm, \
ConfirmMergeForm, \
ExistingLocationForm, \
ExistingLocationWithBoxesForm, \
LocationForm, \
MoveToLocationForm, \
NewBoxForm
from fpiweb.models import \
Box, \
BoxNumber, \
BoxType, \
Location, \
LocBin, \
LocRow, \
LocTier, \
Product
class NewBoxFormTest(TestCase):
"""
Test creating a new box number not previously in inventory.
"""
fixtures = ('BoxType', 'Constraints')
def test_save(self):
"""
Test saving a new box number.
:return:
"""
box_type = BoxType.objects.get(box_type_code='Evans')
post_data = {
'box_number': '27',
'box_type': box_type.pk,
}
form = NewBoxForm(post_data)
self.assertTrue(
form.is_valid(),
f"{form.errors} {form.non_field_errors()}",
)
form.save(commit=True)
box = form.instance
self.assertIsNotNone(box)
self.assertIsNotNone(box.pk)
self.assertEqual(box_type.box_type_qty, box.quantity)
class BuildPalletFormTest(TestCase):
"""
Test the form for building a pallet of boxes.
"""
def test_is_valid__location_not_specified(self):
form = BuildPalletForm()
self.assertFalse(form.is_valid())
class BoxItemFormTest(TestCase):
"""
Test adding boxes to the pallet form.
"""
fixtures = ('BoxType', 'Product', 'ProductCategory', 'Constraints')
def test_box_number_validation(self):
box_number = 'blerg123'
post_data = {
'box_number': box_number,
'product': Product.objects.first().pk,
'exp_year': 2022,
}
form = BoxItemForm(post_data)
self.assertFalse(form.is_valid())
self.assertIn(
f'{box_number} is not a valid Box Number',
form.errors.get('box_number'),
)
def test_expire_months(self):
"""
Ensure that start month <= end month
"""
post_data = {
'box_number': BoxNumber.format_box_number(12),
'product': Product.objects.first().pk,
'exp_year': 2022,
'exp_month_start': 5,
'exp_month_end': 3,
}
form = BoxItemForm(post_data)
self.assertFalse(form.is_valid())
self.assertIn(
'Exp month end must be later than or equal to Exp month start',
form.non_field_errors(),
)
class LocationFormTest(TestCase):
fixtures = ('LocRow', 'LocBin', 'LocTier')
def test_is_valid__missing_value(self):
row = LocRow.objects.get(pk=1)
form = LocationForm({
'loc_row': row.id,
'loc_tier': 99,
})
self.assertFalse(form.is_valid())
self.assertEqual(
{'loc_bin', 'loc_tier'},
form.errors.keys(),
)
self.assertEqual(
['This field is required.'],
form.errors['loc_bin'],
)
self.assertEqual(
['Select a valid choice. That choice is not one of the available choices.'],
form.errors['loc_tier'],
)
class ExistingLocationFormTest(TestCase):
fixtures = ('LocRow', 'LocBin', 'LocTier', 'Location')
def test_clean__nonexistent_location(self):
loc_row = LocRow.objects.get(loc_row='04')
loc_bin = LocBin.objects.get(loc_bin='03')
loc_tier = LocTier.objects.get(loc_tier='B2')
# ----------------------
# Non-existent location
# ----------------------
location = Location.get_location(loc_row, loc_bin, loc_tier)
location.delete()
form = ExistingLocationForm({
'loc_row': loc_row.pk,
'loc_bin': loc_bin.pk,
'loc_tier': loc_tier.pk,
})
self.assertFalse(form.is_valid())
self.assertEqual(
{
'__all__': ['Location 04, 03, B2 does not exist.']
},
form.errors,
)
self.assertEqual(
['Location 04, 03, B2 does not exist.'],
form.non_field_errors(),
)
def test_clean__multiple_locations_found(self):
# -------------------------
# Multiple locations found
# -------------------------
location = Location.get_location('04', '04', 'B1')
# Create a duplicate location
Location.objects.create(
loc_row=location.loc_row,
loc_bin=location.loc_bin,
loc_tier=location.loc_tier
)
form = ExistingLocationForm({
'loc_row': location.loc_row.pk,
'loc_bin': location.loc_bin.pk,
'loc_tier': location.loc_tier.pk,
})
self.assertFalse(form.is_valid())
self.assertEqual(
{
'__all__': ['Multiple 04, 04, B1 locations found'],
},
form.errors,
)
self.assertEqual(
['Multiple 04, 04, B1 locations found'],
form.non_field_errors(),
)
def test_clean__successful_run(self):
location = Location.get_location('04', '02', 'B1')
form = ExistingLocationForm({
'loc_row': location.loc_row.pk,
'loc_bin': location.loc_bin.pk,
'loc_tier': location.loc_tier.pk,
})
self.assertTrue(form.is_valid())
self.assertEqual(location.pk, form.cleaned_data['location'].pk)
class ExistingLocationWithBoxesFormTest(TestCase):
fixtures = ('BoxType', 'LocRow', 'LocBin', 'LocTier', 'Location')
def test_clean(self):
# ----------------------------------------------------------------
# super class's clean detects error (i.e. location doesn't exist)
# ----------------------------------------------------------------
loc_row = '03'
loc_bin = '03'
loc_tier = 'A1'
location = Location.get_location(loc_row, loc_bin, loc_tier)
location.delete()
form = ExistingLocationWithBoxesForm({
'loc_row': location.loc_row,
'loc_bin': location.loc_bin,
'loc_tier': location.loc_tier
})
self.assertFalse(form.is_valid())
self.assertEqual(
{'__all__': ['Location 03, 03, A1 does not exist.']},
form.errors,
)
self.assertEqual(
['Location 03, 03, A1 does not exist.'],
form.non_field_errors(),
)
# ---------------------------
# Try a location w/out boxes
# ---------------------------
location = Location.objects.annotate(
box_count=Count('box')
).filter(
box_count=0
).first()
form = ExistingLocationWithBoxesForm({
'loc_row': location.loc_row,
'loc_bin': location.loc_bin,
'loc_tier': location.loc_tier,
})
self.assertFalse(form.is_valid())
expected_error = "Location {}, {}, {} doesn't have any boxes".format(
location.loc_row.loc_row,
location.loc_bin.loc_bin,
location.loc_tier.loc_tier,
)
self.assertEqual(
{'__all__': [expected_error]},
form.errors,
)
self.assertEqual(
[expected_error],
form.non_field_errors(),
)
# ---------------------------------------------
# Add a box to the location form will validate
# ---------------------------------------------
Box.objects.create(
box_type=Box.box_type_default(),
box_number=BoxNumber.format_box_number(111),
location=location,
)
form = ExistingLocationWithBoxesForm({
'loc_row': location.loc_row,
'loc_bin': location.loc_bin,
'loc_tier': location.loc_tier,
})
self.assertTrue(form.is_valid())
self.assertEqual(
location,
form.cleaned_data['location'],
)
class MoveToLocationFormTest(TestCase):
fixtures = (
'Location',
'LocBin',
'LocRow',
'LocTier',
)
def test_is_valid(self):
"""
MoveToLocationForm adds the from_location field to the
ExistingLocationForm so we only look at how from_location
effects validation.
:return: None
"""
to_location = Location.get_location('01', '01', 'A2')
form = MoveToLocationForm({
'loc_row': to_location.loc_row,
'loc_bin': to_location.loc_bin,
'loc_tier': to_location.loc_tier,
})
self.assertFalse(form.is_valid())
self.assertEqual(
{'from_location': ['This field is required.']},
form.errors,
)
self.assertEqual(
[],
form.non_field_errors(),
)
from_location = Location.get_location('01', '01', 'A1')
form = MoveToLocationForm({
'loc_row': to_location.loc_row,
'loc_bin': to_location.loc_bin,
'loc_tier': to_location.loc_tier,
'from_location': from_location,
})
self.assertTrue(form.is_valid())
class ConfirmMergeFormTest(TestCase):
fixtures = ('Location', 'LocBin', 'LocRow', 'LocTier')
def test_is_valid(self):
form = ConfirmMergeForm({})
self.assertFalse(form.is_valid())
self.assertEqual(
{
'from_location': ['This field is required.'],
'to_location': ['This field is required.'],
'action': ['This field is required.'],
},
form.errors
)
self.assertEqual(
[],
form.non_field_errors(),
)
from_location = Location.get_location('01', '01', 'C1')
to_location = Location.get_location('02', '02', 'C1')
form = ConfirmMergeForm({
'from_location': from_location,
'to_location': to_location,
'action': ConfirmMergeForm.ACTION_MERGE_PALLETS,
})
self.assertTrue(form.is_valid(), dict(form.errors))
self.assertEqual({}, form.errors)
self.assertEqual([], form.non_field_errors())
def test_boxes_at_location_int(self):
boxes_at_to_location = 5
form = ConfirmMergeForm(
initial={'boxes_at_to_location': boxes_at_to_location}
)
self.assertEqual(
boxes_at_to_location,
form.boxes_at_to_location_int()
)
def test_to_location_str(self):
form = ConfirmMergeForm({})
self.assertEqual(
'to_location not found in initial',
form.to_location_str(),
)
to_location = Location.get_location('02', '02', 'C1')
form = ConfirmMergeForm(
initial={
'to_location': to_location,
}
)
self.assertEqual('02, 02, C1', form.to_location_str())
| 27.275362 | 88 | 0.535423 |
acf0a478c151a63a07446a279b919aa3c1b22622 | 3,129 | py | Python | aiida/orm/utils/log.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 180 | 2019-07-12T07:45:26.000Z | 2022-03-22T13:16:57.000Z | aiida/orm/utils/log.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 2,325 | 2019-07-04T13:41:44.000Z | 2022-03-31T12:17:10.000Z | aiida/orm/utils/log.py | azadoks/aiida-core | b806b7fef8fc79090deccfe2019b77cb922e0581 | [
"MIT",
"BSD-3-Clause"
] | 88 | 2019-07-06T01:42:39.000Z | 2022-03-18T14:20:09.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module for logging methods/classes that need the ORM."""
import logging
class DBLogHandler(logging.Handler):
"""A custom db log handler for writing logs tot he database"""
def emit(self, record):
if record.exc_info:
# We do this because if there is exc_info this will put an appropriate string in exc_text.
# See:
# https://github.com/python/cpython/blob/1c2cb516e49ceb56f76e90645e67e8df4e5df01a/Lib/logging/handlers.py#L590
self.format(record)
from django.core.exceptions import ImproperlyConfigured # pylint: disable=no-name-in-module, import-error
from aiida import orm
try:
try:
backend = record.__dict__.pop('backend')
orm.Log.objects(backend).create_entry_from_record(record)
except KeyError:
# The backend should be set. We silently absorb this error
pass
except ImproperlyConfigured:
# Probably, the logger was called without the
# Django settings module loaded. Then,
# This ignore should be a no-op.
pass
except Exception: # pylint: disable=broad-except
# To avoid loops with the error handler, I just print.
# Hopefully, though, this should not happen!
import traceback
traceback.print_exc()
raise
def get_dblogger_extra(node):
"""Return the additional information necessary to attach any log records to the given node instance.
:param node: a Node instance
"""
from aiida.orm import Node
# If the object is not a Node or it is not stored, then any associated log records should bot be stored. This is
# accomplished by returning an empty dictionary because the `dbnode_id` is required to successfully store it.
if not isinstance(node, Node) or not node.is_stored:
return {}
return {'dbnode_id': node.id, 'backend': node.backend}
def create_logger_adapter(logger, node):
"""Create a logger adapter for the given Node instance.
:param logger: the logger to adapt
:param node: the node instance to create the adapter for
:return: the logger adapter
:rtype: :class:`logging.LoggerAdapter`
"""
from aiida.orm import Node
if not isinstance(node, Node):
raise TypeError('node should be an instance of `Node`')
return logging.LoggerAdapter(logger=logger, extra=get_dblogger_extra(node))
| 40.115385 | 122 | 0.601151 |
acf0a51d97994a64c3cbab67ec340d14899a38b4 | 1,044 | py | Python | stubs/ev3_pybricks_1_0_0/usocket.py | RonaldHiemstra/micropython-stubs | d97f879b01f6687baaebef1c7e26a80909c3cff3 | [
"MIT"
] | 38 | 2020-10-18T21:59:44.000Z | 2022-03-17T03:03:28.000Z | stubs/ev3_pybricks_1_0_0/usocket.py | RonaldHiemstra/micropython-stubs | d97f879b01f6687baaebef1c7e26a80909c3cff3 | [
"MIT"
] | 176 | 2020-10-18T14:31:03.000Z | 2022-03-30T23:22:39.000Z | stubs/ev3_pybricks_1_0_0/usocket.py | RonaldHiemstra/micropython-stubs | d97f879b01f6687baaebef1c7e26a80909c3cff3 | [
"MIT"
] | 6 | 2020-12-28T21:11:12.000Z | 2022-02-06T04:07:50.000Z | """
Module: 'usocket' on LEGO EV3 v1.0.0
"""
# MCU: sysname=ev3, nodename=ev3, release=('v1.0.0',), version=('0.0.0',), machine=ev3
# Stubber: 1.3.2
AF_INET = 2
AF_INET6 = 10
AF_UNIX = 1
MSG_DONTROUTE = 4
MSG_DONTWAIT = 64
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_STREAM = 1
SOL_SOCKET = 1
SO_BROADCAST = 6
SO_ERROR = 4
SO_KEEPALIVE = 9
SO_LINGER = 13
SO_REUSEADDR = 2
def getaddrinfo():
pass
def inet_ntop():
pass
def inet_pton():
pass
def sockaddr():
pass
class socket:
''
def accept():
pass
def bind():
pass
def close():
pass
def connect():
pass
def fileno():
pass
def listen():
pass
def makefile():
pass
def read():
pass
def readinto():
pass
def readline():
pass
def recv():
pass
def recvfrom():
pass
def send():
pass
def sendto():
pass
def setblocking():
pass
def setsockopt():
pass
def write():
pass
| 12.139535 | 86 | 0.528736 |
acf0a550aead2a08b21c76b39a11899935b4574b | 1,664 | py | Python | tutorials/01-basics/linear_regression/main.py | XiaoSanGit/pytorch-tutorial | e12207b270bbd64234e0f535a1a4d7be990d2187 | [
"MIT"
] | null | null | null | tutorials/01-basics/linear_regression/main.py | XiaoSanGit/pytorch-tutorial | e12207b270bbd64234e0f535a1a4d7be990d2187 | [
"MIT"
] | null | null | null | tutorials/01-basics/linear_regression/main.py | XiaoSanGit/pytorch-tutorial | e12207b270bbd64234e0f535a1a4d7be990d2187 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# Hyper-parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001
# Toy dataset
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
# Linear regression model
model = nn.Linear(input_size, output_size)
# Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
# Convert numpy arrays to torch tensors
# a way to feed data directly
inputs = torch.from_numpy(x_train)
targets = torch.from_numpy(y_train) #we can construct datasets and loop it or directly use this as model() input
# Forward pass
outputs = model(inputs)
loss = criterion(outputs, targets)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 5 == 0:
print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# Plot the graph
predicted = model(torch.from_numpy(x_train)).detach().numpy()
plt.plot(x_train, y_train, 'ro', label='Original data')
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show()
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt') | 29.714286 | 116 | 0.622596 |
acf0a559904b3eddb4a7e80bc5e234467a8c4d6c | 1,118 | py | Python | app/views/dashboard/items/image_category_activation.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/items/image_category_activation.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/items/image_category_activation.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | from flask import redirect, url_for
from flask_login import login_required
from app import db, logger
from app.models.item_images import ItemImages
from app.models.items import Items
from app.models.items_category import ItemsCategory
from app.views.dashboard import bp
from app.controllers.dashboard_controller import dashboard_controller
@bp.route('/item/image/catalog/activated/<int:item_id>/<int:image_id>', methods=['GET'])
@login_required
@dashboard_controller
def item_category_image_activated(item_id: int, image_id: int):
try:
image = db.session.query(ItemImages).filter(ItemImages.id == image_id).first()
item = db.session.query(Items).filter(Items.id == image.item_id).first()
db.session.query(ItemsCategory).filter(ItemsCategory.id == item.category_id).update({'image': image.url})
db.session.commit()
return redirect(url_for('dashboard.item', item_id=item_id, action='success', id=18))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.item', item_id=item_id, action='warning', id=1)) | 44.72 | 113 | 0.745081 |
acf0a5e1799b7c57dfd82861c9ccc1f132c34375 | 8,342 | py | Python | tensorflow/contrib/keras/api/keras/layers/__init__.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 522 | 2016-06-08T02:15:50.000Z | 2022-03-02T05:30:36.000Z | tensorflow/contrib/keras/api/keras/layers/__init__.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 48 | 2016-07-26T00:11:55.000Z | 2022-02-23T13:36:33.000Z | tensorflow/contrib/keras/api/keras/layers/__init__.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 108 | 2016-06-16T15:34:05.000Z | 2022-03-12T13:23:11.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Generic layers.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras._impl.keras.engine import Input
from tensorflow.python.keras._impl.keras.engine import InputLayer
from tensorflow.python.keras._impl.keras.engine import InputSpec
from tensorflow.python.keras._impl.keras.engine import Layer
# Advanced activations.
from tensorflow.python.keras._impl.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras._impl.keras.layers.advanced_activations import PReLU
from tensorflow.python.keras._impl.keras.layers.advanced_activations import ELU
from tensorflow.python.keras._impl.keras.layers.advanced_activations import ThresholdedReLU
# Convolution layers.
from tensorflow.python.keras._impl.keras.layers.convolutional import Conv1D
from tensorflow.python.keras._impl.keras.layers.convolutional import Conv2D
from tensorflow.python.keras._impl.keras.layers.convolutional import Conv3D
from tensorflow.python.keras._impl.keras.layers.convolutional import Conv2DTranspose
from tensorflow.python.keras._impl.keras.layers.convolutional import Conv3DTranspose
from tensorflow.python.keras._impl.keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from tensorflow.python.keras._impl.keras.layers.convolutional import Convolution1D
from tensorflow.python.keras._impl.keras.layers.convolutional import Convolution2D
from tensorflow.python.keras._impl.keras.layers.convolutional import Convolution3D
from tensorflow.python.keras._impl.keras.layers.convolutional import Convolution2DTranspose
from tensorflow.python.keras._impl.keras.layers.convolutional import Convolution3DTranspose
from tensorflow.python.keras._impl.keras.layers.convolutional import SeparableConvolution2D
# Image processing layers.
from tensorflow.python.keras._impl.keras.layers.convolutional import UpSampling1D
from tensorflow.python.keras._impl.keras.layers.convolutional import UpSampling2D
from tensorflow.python.keras._impl.keras.layers.convolutional import UpSampling3D
from tensorflow.python.keras._impl.keras.layers.convolutional import ZeroPadding1D
from tensorflow.python.keras._impl.keras.layers.convolutional import ZeroPadding2D
from tensorflow.python.keras._impl.keras.layers.convolutional import ZeroPadding3D
from tensorflow.python.keras._impl.keras.layers.convolutional import Cropping1D
from tensorflow.python.keras._impl.keras.layers.convolutional import Cropping2D
from tensorflow.python.keras._impl.keras.layers.convolutional import Cropping3D
# Convolutional-recurrent layers.
from tensorflow.python.keras._impl.keras.layers.convolutional_recurrent import ConvLSTM2D
# Core layers.
from tensorflow.python.keras._impl.keras.layers.core import Masking
from tensorflow.python.keras._impl.keras.layers.core import Dropout
from tensorflow.python.keras._impl.keras.layers.core import SpatialDropout1D
from tensorflow.python.keras._impl.keras.layers.core import SpatialDropout2D
from tensorflow.python.keras._impl.keras.layers.core import SpatialDropout3D
from tensorflow.python.keras._impl.keras.layers.core import Activation
from tensorflow.python.keras._impl.keras.layers.core import Reshape
from tensorflow.python.keras._impl.keras.layers.core import Permute
from tensorflow.python.keras._impl.keras.layers.core import Flatten
from tensorflow.python.keras._impl.keras.layers.core import RepeatVector
from tensorflow.python.keras._impl.keras.layers.core import Lambda
from tensorflow.python.keras._impl.keras.layers.core import Dense
from tensorflow.python.keras._impl.keras.layers.core import ActivityRegularization
# Embedding layers.
from tensorflow.python.keras._impl.keras.layers.embeddings import Embedding
# Locally-connected layers.
from tensorflow.python.keras._impl.keras.layers.local import LocallyConnected1D
from tensorflow.python.keras._impl.keras.layers.local import LocallyConnected2D
# Merge layers.
from tensorflow.python.keras._impl.keras.layers.merge import Add
from tensorflow.python.keras._impl.keras.layers.merge import Multiply
from tensorflow.python.keras._impl.keras.layers.merge import Average
from tensorflow.python.keras._impl.keras.layers.merge import Maximum
from tensorflow.python.keras._impl.keras.layers.merge import Concatenate
from tensorflow.python.keras._impl.keras.layers.merge import Dot
from tensorflow.python.keras._impl.keras.layers.merge import add
from tensorflow.python.keras._impl.keras.layers.merge import multiply
from tensorflow.python.keras._impl.keras.layers.merge import average
from tensorflow.python.keras._impl.keras.layers.merge import maximum
from tensorflow.python.keras._impl.keras.layers.merge import concatenate
from tensorflow.python.keras._impl.keras.layers.merge import dot
# Noise layers.
from tensorflow.python.keras._impl.keras.layers.noise import AlphaDropout
from tensorflow.python.keras._impl.keras.layers.noise import GaussianNoise
from tensorflow.python.keras._impl.keras.layers.noise import GaussianDropout
# Normalization layers.
from tensorflow.python.keras._impl.keras.layers.normalization import BatchNormalization
# Pooling layers.
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPooling3D
from tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras._impl.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalAveragePooling1D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalAveragePooling3D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalMaxPooling1D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPool1D
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPool2D
from tensorflow.python.keras._impl.keras.layers.pooling import MaxPool3D
from tensorflow.python.keras._impl.keras.layers.pooling import AvgPool1D
from tensorflow.python.keras._impl.keras.layers.pooling import AvgPool2D
from tensorflow.python.keras._impl.keras.layers.pooling import AvgPool3D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalAvgPool1D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalAvgPool2D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalAvgPool3D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalMaxPool1D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalMaxPool2D
from tensorflow.python.keras._impl.keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from tensorflow.python.keras._impl.keras.layers.recurrent import SimpleRNN
from tensorflow.python.keras._impl.keras.layers.recurrent import GRU
from tensorflow.python.keras._impl.keras.layers.recurrent import LSTM
# Wrapper functions
from tensorflow.python.keras._impl.keras.layers.wrappers import Wrapper
from tensorflow.python.keras._impl.keras.layers.wrappers import Bidirectional
from tensorflow.python.keras._impl.keras.layers.wrappers import TimeDistributed
del absolute_import
del division
del print_function
| 55.986577 | 91 | 0.846799 |
acf0a61ce734e0bcd9734b6268854e9a3a49e9fd | 930 | py | Python | alphametics.py | liguangyulgy/mytest1 | 08133d04881ec94df49093aba94baa31e30ffb9b | [
"BSD-2-Clause"
] | null | null | null | alphametics.py | liguangyulgy/mytest1 | 08133d04881ec94df49093aba94baa31e30ffb9b | [
"BSD-2-Clause"
] | null | null | null | alphametics.py | liguangyulgy/mytest1 | 08133d04881ec94df49093aba94baa31e30ffb9b | [
"BSD-2-Clause"
] | null | null | null | import re
import itertools
def solve(puzzle):
words = re.findall('[A-Z]+',puzzle.upper())
unique_characters = set(''.join(words))
assert len(unique_characters) <= 10, 'TOO MANY'
first_letters = {word[0] for word in words}
n = len(first_letters)
sorted_characters = ''.join(first_letters) + ''.join(unique_characters - first_letters)
characters = tuple(ord(c) for c in sorted_characters)
digits = tuple(ord(c) for c in '0123456789')
zero = digits[0]
for guess in itertools.permutations(digits,len(characters)):
if zero not in guess[:n]:
equation = puzzle.translate(dict(zip(characters,guess)))
if eval(equation):
return equation
if __name__ == '__main__':
import sys
for puzzle in ("HAWAII + IDAHO + IOWA + OHIO == STATES",):
print(puzzle)
solution = solve(puzzle)
if solution:
print (solution)
| 32.068966 | 91 | 0.629032 |
acf0a7383cf3bb40d48e79d36110f90294bb06e9 | 384 | py | Python | hello.py | pridator-coder/Project0 | 6d4311fc2564f5aed8d77d849f8fc6206d8a8168 | [
"MIT"
] | null | null | null | hello.py | pridator-coder/Project0 | 6d4311fc2564f5aed8d77d849f8fc6206d8a8168 | [
"MIT"
] | null | null | null | hello.py | pridator-coder/Project0 | 6d4311fc2564f5aed8d77d849f8fc6206d8a8168 | [
"MIT"
] | null | null | null | # This program says hello world and asks for my name.
print('Hello, world!')
print('what is your name?') # ask for the name
my_Name = input()
print('It is nice to meet you, '+ my_Name)
print('The length of your name is:')
print(len(my_Name))
print(len(' '))
print('What is your age?') # ask for the age
my_Age = input()
print('You will be ' + str(int(my_Age) + 1) + ' in a year')
| 32 | 59 | 0.658854 |
acf0a80eb7bc62ef7db8f617939da061bc746a31 | 15,298 | py | Python | oblib/data_model.py | garretfick/core | 457dfc6d30b12f90bf899594d76f8b3496d264dd | [
"Apache-2.0"
] | null | null | null | oblib/data_model.py | garretfick/core | 457dfc6d30b12f90bf899594d76f8b3496d264dd | [
"Apache-2.0"
] | null | null | null | oblib/data_model.py | garretfick/core | 457dfc6d30b12f90bf899594d76f8b3496d264dd | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Jonathan Xia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Hypercube(object):
"""
(Currently a placeholder) a data structure to represent a table
(aka a Hypercube) within a document.
"""
def __init__(self, table_name, axis_names):
self._table_name = table_name
self._axis_names = axis_names
self._line_items = []
def name(self):
return self._table_name
def axes(self):
return self._axis_names
def addLineItem(self, line_item_name):
self._line_items = line_item_name
def hasLineItem(self, line_item_name):
return line_item_name in self._line_items
class Context(object):
def __init__(self, **kwargs):
# kwargs must provide exactly one of instant or duration
if "instant" in kwargs and "duration" in kwargs:
raise Exception("Context given both instant and duration")
if (not "instant" in kwargs) and (not "duration" in kwargs):
raise Exception("Context not given either instant or duration")
if "instant" in kwargs:
self.instant = kwargs.pop("instant")
if "duration" in kwargs:
self.duration = kwargs.pop("duration")
if "entity" in kwargs:
self.entity = kwargs.pop("entity")
# anything that's not instant/duration or entity must be an axis
self.axes = {}
for keyword in kwargs:
if not keyword.endswith("Axis"):
# TODO in the future we should use metadata to identify
# what's an axis, not just look for the string "Axis".
raise Exception("Context given invalid keyword {}".format(keyword))
qualified_name = "solar:" + keyword
self.axes[qualified_name] = kwargs[keyword]
class Concept(object):
def __init__(self, concept_name):
self.name = concept_name
self.parent = None
self.children = []
def setParent(self, new_parent):
self.parent = new_parent
if not self in self.parent.children:
self.parent.children.append(self)
def addChild(self, new_child):
if not new_child in self.children:
self.children.append(new_child)
new_child.parent = self
def getAncestors(self):
# returns a list of concept's parent, concept's parent's parent, etc.
ancestors = []
curr = self
while curr.parent is not None:
ancestors.append(curr.parent)
curr = curr.parent
return ancestors
class Entrypoint(object):
"""
A data structure representing an orange button document
from a particular entrypoint -- for example, an MOR.
This class's representation of the data is format-agnostic, it just
provides methods for getting/setting and validation; translation to
and from particular physical file format (or database schema) will
be handled elsewhere.
"""
def __init__(self, entrypoint_name, taxonomy):
"""
Initializes an empty instance of a document corresponding to the named
entrypoint. entrypoint_name is a string that must match an entry point in
the taxonomy. Looks up the list of concepts for this entry point from
the taxonomy to know what concepts are allowed in the document.
Looks up the relationships between concepts for this entry point from
the taxonomy to know the hierarchical relationship of concepts, tables,
and axes/dimensions.
taxonomy_semantic should be the global singleton TaxonomySemantic
object.
"""
self.ts = taxonomy.semantic
self.entrypoint_name = entrypoint_name
if not self.ts.validate_ep(entrypoint_name):
raise Exception("There is no Orange Button entrypoint named {}."\
.format(entrypoint_name))
# This gives me the list of every concept that could ever be
# included in the document.
self._all_allowed_concepts = self.ts.concepts_ep(entrypoint_name)
# ts.concepts_info_ep(entrypoint_name) # fails on
# u'solar:MeterRatingAccuracy:1'
# Get the relationships (this comes from solar_taxonomy/documents/
# <entrypoint>/<entrypoint><version>_def.xml)
self.relations = self.ts.relationships_ep(entrypoint_name)
# Search through the relationships to find all of the tables, their
# axes, and parent/child relationships between concepts:
self._find_tables()
self._find_parents()
self.facts = {}
def allowedConcepts(self):
return self._all_allowed_concepts
def _find_tables(self):
"""
Uses relations to find all of the tables (hypercubes) allowed in
the document, and the axes and lineitems for each one.
"""
# When there's an arcrole of "hypercube-dimensions", the "from"
# is a hypercube/table, and the "to" is an axis.
self._tables = {}
axes = {}
for relation in self.relations:
if relation['role'] == 'hypercube-dimension':
table_name = relation['from']
axis_name = relation['to']
if not table_name in axes:
axes[table_name] = []
axes[table_name].append(axis_name)
for table_name in axes:
self._tables[table_name] = Hypercube( table_name, axes[table_name] )
# If there's an arcrole of "all" then the "from" is a LineItems
# and the "to" is the table? I think?
for relation in self.relations:
if relation['role'] == 'all':
line_item = relation['from']
table_name = relation['to']
table = self._tables[table_name]
table.addLineItem(line_item)
def _find_parents(self):
# Put the concepts into a tree based on domain-member
# relations.
all_my_concepts = {}
for relation in self.relations:
if relation['role'] == 'domain-member':
parent = relation['from']
child = relation['to']
if not parent in all_my_concepts:
all_my_concepts[parent] = Concept(parent)
if not child in all_my_concepts:
all_my_concepts[child] = Concept(child)
all_my_concepts[parent].addChild(all_my_concepts[child])
self.all_my_concepts = all_my_concepts
def getTableNames(self):
return self._tables.keys()
def getTable(self, table_name):
return self._tables[table_name]
def _identify_relations(self, concept_name):
# Development method for listing all relationships for debugging
# purposes. Do not use in production.
from_me = [r for r in self.relations if r['from'] == concept_name]
for x in from_me:
print( "{} -> {} -> {}".format(concept_name, x['role'], x['to']))
to_me = [r for r in self.relations if r['to'] == concept_name]
for x in to_me:
print( "{} -> {} -> {}".format(x['from'], x['role'], concept_name))
def getTableForConcept(self, concept_name):
"""
Given a concept_name, returns the table (Hypercube object) which
that concept belongs inside of, or None if there's no match.
"""
if not concept_name in self._all_allowed_concepts:
raise Exception("{} is not an allowed concept for {}".format(
concept_name, self.entrypoint_name))
# We know that a concept belongs in a table because the concept
# is a descendant of a LineItem that has a relationship to the
# table.
if not concept_name in self.all_my_concepts:
return None
ancestors = self.all_my_concepts[concept_name].getAncestors()
for ancestor in ancestors:
if "LineItem" in ancestor.name:
for table in self._tables.values():
if table.hasLineItem(ancestor.name):
return table
return None
def canWriteConcept(self, concept_name):
"""
Returns True if concept_name is a writeable concept within this
document. False for concepts not in this document or concepts that
are only abstract parents of writeable concepts. e.g. you can't
write a value to an "Abstract" or a "LineItem".
"""
if concept_name in self.all_my_concepts:
abstract_keywords = ["Abstract", "LineItems", "Table", "Domain", "Axis"]
for word in abstract_keywords:
if concept_name.endswith(word):
return False
return True
return False
def sufficientContext(self, concept_name, context):
"""
True if the given Context object contains all the information
needed to provide full context for the named concept -- sufficient
time period information (duration/instant), sufficient axes to place
the fact within its table, etc.
Otherwise, raises an exception explaining what is wrong.
"""
# Refactor to put this logic into the Concept?
# make the Context into an object instead of a dictionary?
# do Context arguments as **kwargs ?
metadata = self.ts.concept_info(concept_name)
if metadata.period_type == "duration":
if not context.duration:
raise Exception("Missing required duration in {} context".format(
concept_name))
# a valid duration is either "forever" or {"start", "end"}
valid = False
if context.duration == "forever":
valid = True
if "start" in context.duration and "end" in context.duration:
# TODO check isinstance(duration["start"], datetime)
valid = True
if not valid:
raise Exception("Invalid duration in {} context".format(
concept_name))
if metadata.period_type == "instant":
if not context.instant:
raise Exception("Missing required instant in {} context".format(
concept_name))
# TODO check isinstance(instant, datetime)
# If we got this far, we know the time period is OK. Now check the
# required axes, if this concept is on a table:
table = self.getTableForConcept(concept_name)
if table is not None:
for axis in table.axes():
if not axis in context.axes:
raise Exception("Missing required {} axis for {}".format(
axis, concept_name))
# Check that the value given of context.axes[axis] is valid!
# (How do we do that?)
# TODO check that we haven't given any EXTRA axes that the table
# DOESN'T want?
return True
def set(self, concept, value, **kwargs):
"""
(Placeholder) Adds a fact to the document. The concept and the context
together identify the fact to set, and the value will be stored for
that fact.
If concept and context are identical to a previous call, the old fact
will be overwritten. Otherwise, a new fact is created.
acceptable keyword args:
unit = <unit name>
precision = <number of places past the decimal pt>(for decimal values only)
context = a Context object
can be supplied as separate keyword args instead of context object:
duration = "forever" or {"start": <date>, "end": <date>}
instant = <date>
entity = <entity name>
*Axis = <value> (the name of any Axis in a table in this entrypoint)
"""
if "unit" in kwargs:
unit = kwargs.pop("unit")
if "precision" in kwargs:
precision = kwargs.pop("precision")
if not concept in self._all_allowed_concepts:
raise Exception("{} is not allowed in the {} entrypoint".format(
concept, self.entrypoint_name))
if not self.canWriteConcept(concept):
raise Exception("{} is not a writeable concept".format(concept))
if "context" in kwargs:
context = kwargs.pop("context")
elif len(kwargs.keys()) > 0:
# turn the remaining keyword args into a Context object -- this
# is just syntactic sugar to make this method easier to call.
context = Context(**kwargs)
else:
context = None
# TODO:
# In this case, use the default context if one has been set.
if not self.sufficientContext(concept, context):
raise Exception("Insufficient context given for {}".format(concept))
concept_ancestors = self.all_my_concepts[concept].getAncestors()
concept_metadata = self.ts.concept_info(concept)
table = self.getTableForConcept(concept)
# complain if value is invalid for concept
# complain if context is needed and not present
# complain if context has wrong unit
# complain if context has wrong duration/instant type
# add to facts
# figure out the data structure for facts that can be keyed on
# context as well as concept. Probably need to copy over the logic
# from py-xbrl-generator that turns context values into a context ID.
self.facts[concept] = value
# concept_metadata properties:
#x.period_type
#x.nillable
#x.id
#x.name
#x.substitution_group
#x.type_name
#x.period_independent
# Which will be useful for validation.
def get(self, concept, context=None):
"""
(Placeholder) Returns the value of a fact previously set. The concept
and context together identify the fact to read.
"""
# look up the facts we have
# (context not needed if we only have one fact for this concept)
# complain if no value for concept
# complain if context needed and not provided
#
return self.facts[concept]
def isValid(self):
"""
(Placeholder) Returns true if all of the facts in the document validate.
i.e. they have allowed data types, allowed units, anything that needs
to be in a table has all the required axis values to identify its place
in that table, etc.
"""
return True
def isComplete(self):
"""
(Placeholder) Returns true if no required facts are missing, i.e. if
there is a value for all concepts with nillable=False
"""
return True
| 39.838542 | 84 | 0.617597 |
acf0a83768ee0ca47c45b1242db4899166eca1c6 | 543 | py | Python | Ruby/migrations/0006_auto_20211117_1510.py | IHIMEKPEN/RubyStore | 1914dd8138a2aa5b63f5c1f611b02fab14802aa5 | [
"MIT"
] | null | null | null | Ruby/migrations/0006_auto_20211117_1510.py | IHIMEKPEN/RubyStore | 1914dd8138a2aa5b63f5c1f611b02fab14802aa5 | [
"MIT"
] | null | null | null | Ruby/migrations/0006_auto_20211117_1510.py | IHIMEKPEN/RubyStore | 1914dd8138a2aa5b63f5c1f611b02fab14802aa5 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.8 on 2021-11-17 14:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Ruby', '0005_alter_user_cart_list'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='cart_list',
),
migrations.AddField(
model_name='user',
name='cart_list',
field=models.ManyToManyField(blank=True, null=True, related_name='users', to='Ruby.Product'),
),
]
| 23.608696 | 105 | 0.585635 |
acf0a83d3979eb17ebb2117b5b4f89779e7fba8b | 8,498 | py | Python | thatkitebot/cogs/welcomecog.py | ThatRedKite/thatkitebot | 7e8069e675c3d3d35e4369213b548a3af6d103a0 | [
"MIT"
] | 7 | 2021-11-20T21:23:46.000Z | 2022-03-22T02:19:42.000Z | thatkitebot/cogs/welcomecog.py | ThatRedKite/thatkitebot | 7e8069e675c3d3d35e4369213b548a3af6d103a0 | [
"MIT"
] | 13 | 2021-11-21T10:30:17.000Z | 2022-03-09T23:18:10.000Z | thatkitebot/cogs/welcomecog.py | ThatRedKite/thatkitebot | 7e8069e675c3d3d35e4369213b548a3af6d103a0 | [
"MIT"
] | 9 | 2021-11-21T08:55:39.000Z | 2022-03-07T02:49:20.000Z | # Copyright (c) 2019-2022 ThatRedKite and contributors
from turtle import right
import discord
from discord.ext import commands
import time
import re
from datetime import datetime
from operator import itemgetter
from typing import Optional
import aioredis
import discord
from discord.ext import commands
async def update_count(redis: aioredis.Redis, message: discord.Message):
"""
Updates the welcome count for the given message's author.
"""
if "welcome" in message.content.lower():
write = True
guild, channel, author = message.guild.id, message.channel.id, message.author.id
unixtime = time.mktime(message.created_at.timetuple())
join_key = f"latest_join:{guild}"
assert await redis.exists(join_key) # make sure there is a last_joined key
joined_dict = await redis.hgetall(join_key)
welcome_channel, latest_join, joined_id = itemgetter("join_channel", "latest_join", "user_id")(joined_dict)
welcome_channel, latest_join, joined_id = int(welcome_channel), int(latest_join), int(joined_id)
usr_key = f"leaderboard:{author}:{guild}"
if await redis.exists(usr_key):
latest_welcome = int(await redis.hget(usr_key, "latest_welcome"))
if latest_welcome <= latest_join and joined_id != author:
await redis.hincrby(usr_key, "welcome_count", 1) # increase welcome_count by one; create if not exist
else:
return
else:
write = (welcome_channel == channel)
await redis.hset(usr_key, "welcome_count", 1)
if write:
await redis.hset(usr_key, "latest_welcome", int(unixtime))
class WelcomeCog(commands.Cog, name="Welcome counter"):
"""
A cog that counts the number of times a user has welcome newly joined members.
"""
def __init__(self, bot):
self.bot: discord.Client = bot
self.redis_welcomes: aioredis.Redis = bot.redis_welcomes
self.settings_redis: aioredis.Redis = bot.redis
async def cog_check(self, ctx):
return await self.settings_redis.hget(ctx.guild.id, "WELCOME") == "TRUE"
@commands.Cog.listener()
async def on_message(self, message):
"""
Updates the welcome count for the given message's author. This is called by the bot on every message.
"""
if self.bot.command_prefix not in message.content and message.author.id != self.bot.user.id and message.channel.id == message.guild.system_channel.id:
try:
await update_count(self.redis_welcomes, message)
except AssertionError:
pass
@commands.Cog.listener()
async def on_member_join(self, joinedmember):
"""
Updates the latest_join key for the given member. This is called by the bot on every member join.
"""
welcomechannel = joinedmember.guild.system_channel.id
lastjoined = joinedmember.joined_at
unixtime = time.mktime(lastjoined.timetuple())
guild = joinedmember.guild.id
key = f"latest_join:{guild}"
datadict = dict(
latest_join=int(unixtime),
user_id=int(joinedmember.id),
join_channel=int(welcomechannel)
)
await self.redis_welcomes.hmset(key, datadict)
await joinedmember.guild.system_channel.send("welcome")
@commands.cooldown(1, 5, commands.BucketType.user)
@commands.command(name="welcomes")
async def welcome(self, ctx, *, user: Optional[discord.Member] = None):
"""
Displays the top 10 users with the most welcome count.
"""
current_time = datetime.utcfromtimestamp(int(time.mktime(ctx.message.created_at.timetuple())))
# Scan all users in the DB
# here's a nice one-liner
key_list = [key async for key in self.redis_welcomes.scan_iter(match=f"leaderboard:*:{ctx.guild.id}")]
leaderboard = dict()
for i in key_list:
author = re.findall(r":[\d]{5,}:", i)[0][1:-1] # extract the author id
leaderboard[f"<@{author}>"] = await self.redis_welcomes.hgetall(i)
sorted_lb = sorted(leaderboard.items(), key=lambda x: int(x[1]['welcome_count']), reverse=True)
if not user:
embed = discord.Embed(title="Welcome leaderboard")
lb_str = ""
number = 1
for i in sorted_lb:
if number <= 10:
match number:
case 1:
number_str = ":first_place: "
case 2:
number_str = ":second_place: "
case 3:
number_str = ":third_place: "
case _:
number_str = " **" + str(number) + "**. "
delta = (current_time - datetime.utcfromtimestamp(int(i[1]["latest_welcome"])))
if delta.days == 1:
respStr = "1 day ago**\n"
elif delta.days > 0:
respStr = str(delta.days) + " days ago**\n"
else:
respStr = str(delta.seconds // 3600) + " hours ago**\n"
lb_str += number_str + str(i[0]) \
+ " welcomes: **" + str(i[1]["welcome_count"]) + "**, last welcome: **" \
+ respStr
number += 1
continue
last_join_dict = await self.redis_welcomes.hgetall(f"latest_join:{ctx.message.guild.id}")
embed.add_field(name=":medal: Top 10:", value=lb_str, inline=False)
if 'user_id' in last_join_dict:
delta = (current_time - datetime.utcfromtimestamp(int(last_join_dict['latest_join'])))
if delta.days == 1:
respStr = "1 day ago**"
elif delta.days > 0:
respStr = str(delta.days) + " days ago**"
else:
respStr = str(delta.seconds // 3600) + " hours ago**"
footer = str(str(f"<@{last_join_dict['user_id']}>")
+ " joined: **"
+ respStr)
embed.add_field(name=":partying_face: Latest join:", value=footer, inline=False)
else:
embed = discord.Embed(title="Personal welcome count")
target_user = user.id
number = 1
found = False
for i in sorted_lb:
if str(target_user) in i[0]:
found = True
delta = (current_time - datetime.utcfromtimestamp(int(i[1]["latest_welcome"])))
if delta.days == 1:
respStr = "1 day ago**\n"
elif delta.days > 0:
respStr = str(delta.days) + " days ago**\n"
else:
respStr = str(delta.seconds // 3600) + " hours ago**\n"
lb_str = "**" + str(number) + "**. " + str(i[0]) \
+ " welcomes: **" + str(i[1]["welcome_count"]) + "**, last welcome: **" \
+ respStr
embed.add_field(name=f"{user.display_name}'s welcome count:", value=lb_str, inline=False)
number += 1
if not found:
embed.add_field(name=f"{user.display_name}'s welcome count:", value=f"**∞**. {user.mention} welcomes: **0**, last welcome: **Never**", inline=False)
await ctx.send(embed=embed)
@commands.is_owner()
@commands.command(aliases=["ewlb"])
async def editwelcomelb(self, ctx: commands.Context, user: discord.Member = None, newval: int = None):
"""
Edits the welcome leaderboard.
"""
if user != None and newval != None:
unixtime = time.mktime(ctx.message.created_at.timetuple())
key = f"leaderboard:{user.id}:{ctx.guild.id}"
await self.redis_welcomes.hset(key, "welcome_count", newval)
await self.redis_welcomes.hset(key, "latest_welcome", int(unixtime))
await ctx.send(f"{user.mention}'s welcome count has been set to {newval}.")
else:
await ctx.send(f"Please specify a user and a new value. eg. `{self.bot.command_prefix}ewlb @user 10`")
def setup(bot):
bot.add_cog(WelcomeCog(bot))
| 44.962963 | 164 | 0.560132 |
acf0a871528321495731aac966d8add3d5ebfe7f | 16,454 | py | Python | SimpleNet/model.py | chhanganivarun/saliency | a9edbd7d89d1e170bfb5056eb48e7a103d489995 | [
"MIT"
] | 29 | 2020-03-15T12:06:58.000Z | 2022-02-01T09:40:48.000Z | SimpleNet/model.py | chhanganivarun/saliency | a9edbd7d89d1e170bfb5056eb48e7a103d489995 | [
"MIT"
] | 16 | 2020-03-18T07:26:36.000Z | 2022-03-12T00:44:07.000Z | SimpleNet/model.py | chhanganivarun/saliency | a9edbd7d89d1e170bfb5056eb48e7a103d489995 | [
"MIT"
] | 13 | 2020-03-15T12:07:00.000Z | 2021-10-30T14:42:59.000Z | import torchvision.models as models
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.append('../PNAS/')
from PNASnet import *
from genotypes import PNASNet
class PNASModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(PNASModel, self).__init__()
self.path = '../PNAS/PNASNet-5_Large.pth'
self.pnas = NetworkImageNet(216, 1001, 12, False, PNASNet)
if load_weight:
self.pnas.load_state_dict(torch.load(self.path))
for param in self.pnas.parameters():
param.requires_grad = train_enc
self.padding = nn.ConstantPad2d((0,1,0,1),0)
self.drop_path_prob = 0
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels = 4320, out_channels = 512, kernel_size=3, padding=1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 512+2160, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 1080+256, out_channels = 270, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 540, out_channels = 96, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 192, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
s0 = self.pnas.conv0(images)
s0 = self.pnas.conv0_bn(s0)
out1 = self.padding(s0)
s1 = self.pnas.stem1(s0, s0, self.drop_path_prob)
out2 = s1
s0, s1 = s1, self.pnas.stem2(s0, s1, 0)
for i, cell in enumerate(self.pnas.cells):
s0, s1 = s1, cell(s0, s1, 0)
if i==3:
out3 = s1
if i==7:
out4 = s1
if i==11:
out5 = s1
out5 = self.deconv_layer0(out5)
x = torch.cat((out5,out4), 1)
x = self.deconv_layer1(x)
x = torch.cat((x,out3), 1)
x = self.deconv_layer2(x)
x = torch.cat((x,out2), 1)
x = self.deconv_layer3(x)
x = torch.cat((x,out1), 1)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
x = x.squeeze(1)
return x
class DenseModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(DenseModel, self).__init__()
self.dense = models.densenet161(pretrained=bool(load_weight)).features
for param in self.dense.parameters():
param.requires_grad = train_enc
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv_layer0 = nn.Sequential(*list(self.dense)[:3])
self.conv_layer1 = nn.Sequential(
self.dense.pool0,
self.dense.denseblock1,
*list(self.dense.transition1)[:3]
)
self.conv_layer2 = nn.Sequential(
self.dense.transition1[3],
self.dense.denseblock2,
*list(self.dense.transition2)[:3]
)
self.conv_layer3 = nn.Sequential(
self.dense.transition2[3],
self.dense.denseblock3,
*list(self.dense.transition3)[:3]
)
self.conv_layer4 = nn.Sequential(
self.dense.transition3[3],
self.dense.denseblock4
)
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels = 2208, out_channels = 512, kernel_size=3, padding=1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 512+1056, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 384+256, out_channels = 192, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 192+192, out_channels = 96, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 96+96, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer0(images)
out2 = self.conv_layer1(out1)
out3 = self.conv_layer2(out2)
out4 = self.conv_layer3(out3)
out5 = self.conv_layer4(out4)
assert out1.size() == (batch_size, 96, 128, 128)
assert out2.size() == (batch_size, 192, 64, 64)
assert out3.size() == (batch_size, 384, 32, 32)
assert out4.size() == (batch_size, 1056, 16, 16)
assert out5.size() == (batch_size, 2208, 8, 8)
out5 = self.deconv_layer0(out5)
x = torch.cat((out5,out4), 1)
x = self.deconv_layer1(x)
x = torch.cat((x,out3), 1)
x = self.deconv_layer2(x)
x = torch.cat((x,out2), 1)
x = self.deconv_layer3(x)
x = torch.cat((x,out1), 1)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
x = x.squeeze(1)
return x
class ResNetModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(ResNetModel, self).__init__()
self.num_channels = num_channels
self.resnet = models.resnet50(pretrained=bool(load_weight))
for param in self.resnet.parameters():
param.requires_grad = train_enc
self.conv_layer1 = nn.Sequential(
self.resnet.conv1,
self.resnet.bn1,
self.resnet.relu
)
self.conv_layer2 = nn.Sequential(
self.resnet.maxpool,
self.resnet.layer1
)
self.conv_layer3 = self.resnet.layer2
self.conv_layer4 = self.resnet.layer3
self.conv_layer5 = self.resnet.layer4
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels=2048, out_channels=1024, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 2048, out_channels = 512, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 1024, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 512, out_channels = 64, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 64, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 64, out_channels = 64, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 64, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer1(images)
out2 = self.conv_layer2(out1)
out3 = self.conv_layer3(out2)
out4 = self.conv_layer4(out3)
out5 = self.conv_layer5(out4)
out5 = self.deconv_layer0(out5)
assert out5.size() == (batch_size, 1024, 16, 16)
x = torch.cat((out5,out4), 1)
assert x.size() == (batch_size, 2048, 16, 16)
x = self.deconv_layer1(x)
assert x.size() == (batch_size, 512, 32, 32)
x = torch.cat((x, out3), 1)
assert x.size() == (batch_size, 1024, 32, 32)
x = self.deconv_layer2(x)
assert x.size() == (batch_size, 256, 64, 64)
x = torch.cat((x, out2), 1)
assert x.size() == (batch_size, 512, 64, 64)
x = self.deconv_layer3(x)
assert x.size() == (batch_size, 64, 128, 128)
x = torch.cat((x, out1), 1)
assert x.size() == (batch_size, 128, 128, 128)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
assert x.size() == (batch_size, 1, 256, 256)
x = x.squeeze(1)
assert x.size() == (batch_size, 256, 256)
return x
class VGGModel(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(VGGModel, self).__init__()
self.num_channels = num_channels
self.vgg = models.vgg16(pretrained=bool(load_weight)).features
for param in self.vgg.parameters():
param.requires_grad = train_enc
self.conv_layer1 = self.vgg[:7]
self.conv_layer2 = self.vgg[7:12]
self.conv_layer3 = self.vgg[12:19]
self.conv_layer4 = self.vgg[19:24]
self.conv_layer5 = self.vgg[24:]
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 1024, out_channels = 512, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 1024, out_channels = 256, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 512, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 256, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 128, out_channels = 128, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 128, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer1(images)
out2 = self.conv_layer2(out1)
out3 = self.conv_layer3(out2)
out4 = self.conv_layer4(out3)
out5 = self.conv_layer5(out4)
out5 = self.linear_upsampling(out5)
assert out5.size() == (batch_size, 512, 16, 16)
x = torch.cat((out5,out4), 1)
assert x.size() == (batch_size, 1024, 16, 16)
x = self.deconv_layer1(x)
assert x.size() == (batch_size, 512, 32, 32)
x = torch.cat((x, out3), 1)
assert x.size() == (batch_size, 1024, 32, 32)
x = self.deconv_layer2(x)
assert x.size() == (batch_size, 256, 64, 64)
x = torch.cat((x, out2), 1)
assert x.size() == (batch_size, 512, 64, 64)
x = self.deconv_layer3(x)
assert x.size() == (batch_size, 128, 128, 128)
x = torch.cat((x, out1), 1)
assert x.size() == (batch_size, 256, 128, 128)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
assert x.size() == (batch_size, 1, 256, 256)
x = x.squeeze(1)
assert x.size() == (batch_size, 256, 256)
return x
class MobileNetV2(nn.Module):
def __init__(self, num_channels=3, train_enc=False, load_weight=1):
super(MobileNetV2, self).__init__()
self.mobilenet = torch.hub.load('pytorch/vision:v0.4.0', 'mobilenet_v2', pretrained=True).features
for param in self.mobilenet.parameters():
param.requires_grad = train_enc
self.linear_upsampling = nn.UpsamplingBilinear2d(scale_factor=2)
self.conv_layer1 = self.mobilenet[:2]
self.conv_layer2 = self.mobilenet[2:4]
self.conv_layer3 = self.mobilenet[4:7]
self.conv_layer4 = self.mobilenet[7:14]
self.conv_layer5 = self.mobilenet[14:]
self.deconv_layer0 = nn.Sequential(
nn.Conv2d(in_channels = 1280, out_channels = 96, kernel_size=3, padding=1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer1 = nn.Sequential(
nn.Conv2d(in_channels = 96+96, out_channels = 32, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer2 = nn.Sequential(
nn.Conv2d(in_channels = 32+32, out_channels = 24, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer3 = nn.Sequential(
nn.Conv2d(in_channels = 24+24, out_channels = 16, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer4 = nn.Sequential(
nn.Conv2d(in_channels = 16+16, out_channels = 16, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
self.linear_upsampling
)
self.deconv_layer5 = nn.Sequential(
nn.Conv2d(in_channels = 16, out_channels = 16, kernel_size = 3, padding = 1, bias = True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels = 16, out_channels = 1, kernel_size = 3, padding = 1, bias = True),
nn.Sigmoid()
)
def forward(self, images):
batch_size = images.size(0)
out1 = self.conv_layer1(images)
out2 = self.conv_layer2(out1)
out3 = self.conv_layer3(out2)
out4 = self.conv_layer4(out3)
out5 = self.conv_layer5(out4)
assert out1.size() == (batch_size, 16, 128, 128)
assert out2.size() == (batch_size, 24, 64, 64)
assert out3.size() == (batch_size, 32, 32, 32)
assert out4.size() == (batch_size, 96, 16, 16)
assert out5.size() == (batch_size, 1280, 8, 8)
out5 = self.deconv_layer0(out5)
x = torch.cat((out5,out4), 1)
x = self.deconv_layer1(x)
x = torch.cat((x,out3), 1)
x = self.deconv_layer2(x)
x = torch.cat((x,out2), 1)
x = self.deconv_layer3(x)
x = torch.cat((x,out1), 1)
x = self.deconv_layer4(x)
x = self.deconv_layer5(x)
x = x.squeeze(1)
return x | 36.242291 | 109 | 0.573842 |
acf0aac7c13f707580ca3e502c1166c59e4938f5 | 1,073 | py | Python | setup.py | SfinxCZ/dask-lightgbm | a219cb3d8b90d734bb58acf9c1548338c1f32e05 | [
"BSD-3-Clause"
] | 6 | 2018-10-22T06:45:02.000Z | 2020-03-02T03:20:48.000Z | setup.py | SfinxCZ/dask-lightgbm | a219cb3d8b90d734bb58acf9c1548338c1f32e05 | [
"BSD-3-Clause"
] | null | null | null | setup.py | SfinxCZ/dask-lightgbm | a219cb3d8b90d734bb58acf9c1548338c1f32e05 | [
"BSD-3-Clause"
] | 2 | 2018-10-16T06:48:11.000Z | 2018-10-22T06:45:03.000Z | #!/usr/bin/env python
import io
import os
from setuptools import setup
install_requires = [
'numpy>=1.17.3',
'lightgbm>=2.3.0',
'dask>=2.6.0',
'distributed>=2.6.0',
'toolz>=0.10.0'
]
extras_require = {
'dev': [
'pytest>=5.2.2',
'pandas>=0.25.3',
'dask[dataframe]',
'dask-ml>=1.1.1',
'requests>=2.22.0',
'fsspec>=0.5.2',
'scikit-learn>=0.21.3'
],
'sparse': [
'sparse==0.5.0',
'scipy>=1.3.1'
]
}
setup(name='dask-lightgbm',
version='0.1.0.dev0',
description='Interactions between Dask and LightGBM',
long_description=(io.open('README.md', encoding='utf-8').read()
if os.path.exists('README.md')
else ''),
long_description_content_type='text/markdown',
license='BSD',
url='https://github.com/dask/dask-lightgbm',
install_requires=install_requires,
extras_require=extras_require,
packages=['dask_lightgbm'],
include_package_data=True,
zip_safe=False)
| 23.844444 | 69 | 0.552656 |
acf0aad2e7ff1eed3253be936dd1bd836249e636 | 19,277 | py | Python | src/peter_sslers/web/lib/forms.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | 1 | 2016-03-31T22:19:19.000Z | 2016-03-31T22:19:19.000Z | src/peter_sslers/web/lib/forms.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | null | null | null | src/peter_sslers/web/lib/forms.py | jvanasco/pyramid_letsencrypt_admin | 6db37d30ef8028ff978bf6083cdf978fc88a4782 | [
"MIT"
] | null | null | null | # pypi
from formencode import Schema as _Schema
from formencode.foreach import ForEach
from formencode.validators import _
from formencode.validators import Email
from formencode.validators import FieldStorageUploadConverter
from formencode.validators import FormValidator
from formencode.validators import Int
from formencode.validators import Invalid
from formencode.validators import OneOf
from formencode.validators import RequireIfMissing
from formencode.validators import UnicodeString
# local app
from ...model import utils as model_utils
# ==============================================================================
class OnlyOneOf(FormValidator):
# Field that only one of is allowed
only_one_ofs = None
not_empty = None
__unpackargs__ = ("only_one_ofs",)
messages = {
"empty": _("You must submit one and only one of these linked fields."),
"invalid": _("You may submit only one of these linked fields."),
}
def _convert_to_python(self, value_dict, state):
is_empty = self.field_is_empty
presence = [not is_empty(value_dict.get(field)) for field in self.only_one_ofs]
total_present = presence.count(True)
if not total_present and self.not_empty:
raise Invalid(
_("You must provide a value for one of the fields: %s")
% ", ".join(["`%s`" % field for field in self.only_one_ofs]),
value_dict,
state,
error_dict=dict(
[
(
field,
Invalid(
self.message("empty", state),
value_dict.get(field),
state,
),
)
for field in self.only_one_ofs
]
),
)
if total_present > 1:
raise Invalid(
_("You may only provide a value for one of the fields: %s")
% ", ".join(["`%s`" % field for field in self.only_one_ofs]),
value_dict,
state,
error_dict=dict(
[
(
field,
Invalid(
self.message("invalid", state),
value_dict.get(field),
state,
),
)
for field in self.only_one_ofs
]
),
)
return value_dict
# ==============================================================================
class _Form_Schema_Base(_Schema):
allow_extra_fields = True
filter_extra_fields = True
class _form_AcmeAccount_core(_Form_Schema_Base):
# `account_key_file` could indictate `account_key_file_pem` or the combo of certbot encoding
account_key_option = OneOf(
model_utils.AcmeAccontKey_options_a,
not_empty=True,
)
account_key_global_default = UnicodeString(not_empty=False, if_missing=None)
account_key_existing = UnicodeString(not_empty=False, if_missing=None)
# these are via Form_AcmeAccount_new__file
account_key_file_pem = FieldStorageUploadConverter(not_empty=False, if_missing=None)
account_key_file_le_meta = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
account_key_file_le_pkey = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
account_key_file_le_reg = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
acme_account_provider_id = Int(not_empty=False, if_missing=None)
class _form_PrivateKey_core(_Form_Schema_Base):
private_key_option = OneOf(
model_utils.PrivateKey_options_a,
not_empty=True,
)
private_key_existing = UnicodeString(not_empty=False, if_missing=None)
private_key_file_pem = FieldStorageUploadConverter(not_empty=False, if_missing=None)
class _form_AcmeAccount_reuse(_form_AcmeAccount_core):
account_key_option = OneOf(
model_utils.AcmeAccontKey_options_b,
not_empty=True,
)
account_key_reuse = UnicodeString(not_empty=False, if_missing=None)
class _form_PrivateKey_reuse(_form_PrivateKey_core):
private_key_option = OneOf(model_utils.PrivateKey_options_b, not_empty=True)
private_key_reuse = UnicodeString(not_empty=False, if_missing=None)
class _form_AcmeAccount_PrivateKey_core(_Form_Schema_Base):
"""this is a mix of two forms, because FormEncode doesn't support multiple class inheritance"""
account_key_option = OneOf(
("account_key_global_default", "account_key_existing", "account_key_file"),
not_empty=True,
)
account_key_global_default = UnicodeString(not_empty=False, if_missing=None)
account_key_existing = UnicodeString(not_empty=False, if_missing=None)
account__contact = Email(not_empty=False, if_missing=None) # required if key_pem
# this is the `private_key_cycle` of the AcmeAccount
account__private_key_cycle = OneOf(
model_utils.PrivateKeyCycle._options_AcmeAccount_private_key_cycle,
not_empty=True,
)
# this is the `private_key_technology` of the AcmeAccount
# this is not required on Upload, only New
account__private_key_technology = OneOf(
model_utils.KeyTechnology._options_AcmeAccount_private_key_technology,
not_empty=False,
if_missing=None,
)
# these are via Form_AcmeAccount_new__file
account_key_file_pem = FieldStorageUploadConverter(not_empty=False, if_missing=None)
account_key_file_le_meta = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
account_key_file_le_pkey = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
account_key_file_le_reg = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
acme_account_provider_id = Int(not_empty=False, if_missing=None)
private_key_option = OneOf(
model_utils.PrivateKey_options_a,
not_empty=True,
)
private_key_existing = UnicodeString(not_empty=False, if_missing=None)
private_key_file_pem = FieldStorageUploadConverter(not_empty=False, if_missing=None)
class _form_AcmeAccount_PrivateKey_reuse(_form_AcmeAccount_PrivateKey_core):
"""this is a mix of two forms, because FormEncode doesn't support multiple class inheritance"""
account_key_option = OneOf(
model_utils.AcmeAccontKey_options_b,
not_empty=True,
)
account_key_reuse = UnicodeString(not_empty=False, if_missing=None)
private_key_option = OneOf(
model_utils.PrivateKey_options_b,
not_empty=True,
)
private_key_reuse = UnicodeString(not_empty=False, if_missing=None)
class Form_AcmeAccount_edit(_Form_Schema_Base):
# this is the `private_key_cycle` of the AcmeAccount
account__private_key_cycle = OneOf(
model_utils.PrivateKeyCycle._options_AcmeAccount_private_key_cycle,
not_empty=True,
)
# this is the `private_key_technology` of the AcmeAccount
account__private_key_technology = OneOf(
model_utils.KeyTechnology._options_AcmeAccount_private_key_technology,
not_empty=True,
)
class Form_AcmeAccount_new__auth(_Form_Schema_Base):
acme_account_provider_id = Int(not_empty=True, if_missing=None)
account__contact = Email(not_empty=True, if_missing=None) # use it or don't
# this is the `private_key_cycle` of the AcmeAccount
account__private_key_cycle = OneOf(
model_utils.PrivateKeyCycle._options_AcmeAccount_private_key_cycle,
not_empty=True,
)
# this is the `private_key_technology` of the AcmeAccount
account__private_key_technology = OneOf(
model_utils.KeyTechnology._options_AcmeAccount_private_key_technology,
not_empty=True,
)
class Form_AcmeAccount_new__file(_Form_Schema_Base):
"""
copied into a few other forms
* Form_AcmeOrder_new_freeform
"""
account__contact = Email(not_empty=False, if_missing=None) # required if key_pem
# this is the `private_key_cycle` of the AcmeAccount
account__private_key_cycle = OneOf(
model_utils.PrivateKeyCycle._options_AcmeAccount_private_key_cycle,
not_empty=True,
)
# this is the `private_key_technology` of the AcmeAccount
# this is not required on Upload, only New
account__private_key_technology = OneOf(
model_utils.KeyTechnology._options_AcmeAccount_private_key_technology,
not_empty=False,
if_missing=None,
)
# if this isn't provided...
account_key_file_pem = FieldStorageUploadConverter(not_empty=False, if_missing=None)
acme_account_provider_id = Int(not_empty=False, if_missing=None)
# require all of these...
account_key_file_le_meta = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
account_key_file_le_pkey = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
account_key_file_le_reg = FieldStorageUploadConverter(
not_empty=False, if_missing=None
)
class Form_AcmeAccount_mark(_Form_Schema_Base):
action = OneOf(("global_default", "active", "inactive"), not_empty=True)
class Form_AcmeAccount_deactivate(_Form_Schema_Base):
key_pem = UnicodeString(not_empty=True)
class Form_AcmeAccount_key_change(_Form_Schema_Base):
key_pem_existing = UnicodeString(not_empty=True)
class Form_AcmeAccount_deactivate_authorizations(_Form_Schema_Base):
acme_authorization_id = ForEach(Int())
class Form_AcmeDnsServer_new(_Form_Schema_Base):
root_url = UnicodeString(not_empty=True)
class Form_AcmeDnsServer_mark(_Form_Schema_Base):
action = OneOf(
(
"active",
"inactive",
"global_default",
),
not_empty=True,
)
class Form_AcmeDnsServer_edit(_Form_Schema_Base):
root_url = UnicodeString(not_empty=True)
class Form_AcmeDnsServer_ensure_domains(_Form_Schema_Base):
domain_names = UnicodeString(not_empty=True)
class Form_AcmeDnsServer_import_domain(_Form_Schema_Base):
domain_name = UnicodeString(not_empty=True)
# acme-dns fields:
username = UnicodeString(not_empty=True)
password = UnicodeString(not_empty=True)
fulldomain = UnicodeString(not_empty=True)
subdomain = UnicodeString(not_empty=True)
allowfrom = UnicodeString(not_empty=False, if_missing=None)
class Form_AcmeOrder_new_freeform(_form_AcmeAccount_PrivateKey_core):
domain_names_http01 = UnicodeString(not_empty=False, if_missing=None)
domain_names_dns01 = UnicodeString(not_empty=False, if_missing=None)
processing_strategy = OneOf(
model_utils.AcmeOrder_ProcessingStrategy.OPTIONS_ALL,
not_empty=True,
)
# this is the `private_key_cycle` of the AcmeOrder renewals
private_key_cycle__renewal = OneOf(
model_utils.PrivateKeyCycle._options_AcmeOrder_private_key_cycle,
not_empty=True,
)
chained_validators = [
RequireIfMissing("domain_names_http01", missing="domain_names_dns01"),
RequireIfMissing("domain_names_dns01", missing="domain_names_http01"),
]
class Form_AcmeOrder_renew_quick(_Form_Schema_Base):
processing_strategy = OneOf(
model_utils.AcmeOrder_ProcessingStrategy.OPTIONS_ALL,
not_empty=True,
)
class Form_AcmeOrder_renew_custom(_form_AcmeAccount_PrivateKey_reuse):
processing_strategy = OneOf(
model_utils.AcmeOrder_ProcessingStrategy.OPTIONS_ALL,
not_empty=True,
)
# this is the `private_key_cycle` of the AcmeOrder renewals
private_key_cycle__renewal = OneOf(
model_utils.PrivateKeyCycle._options_AcmeOrder_private_key_cycle,
not_empty=True,
)
class Form_AcmeOrderless_new(_form_AcmeAccount_core):
domain_names_http01 = UnicodeString(not_empty=False, if_missing=None)
domain_names_dns01 = UnicodeString(not_empty=False, if_missing=None)
account_key_option = OneOf(
(
"none",
"account_key_global_default",
"account_key_existing",
"account_key_file",
),
not_empty=False,
)
chained_validators = [
RequireIfMissing("domain_names_http01", missing="domain_names_dns01"),
RequireIfMissing("domain_names_dns01", missing="domain_names_http01"),
]
class Form_AcmeOrderless_manage_domain(_Form_Schema_Base):
challenge_key = UnicodeString(not_empty=True)
challenge_text = UnicodeString(not_empty=True)
class Form_AcmeOrderless_AcmeChallenge_add(_Form_Schema_Base):
acme_challenge_type = OneOf(
model_utils.AcmeChallengeType._OPTIONS_AcmeOrderless_AddChallenge,
not_empty=True,
)
domain = UnicodeString(not_empty=True)
token = UnicodeString(not_empty=False, if_missing=None)
keyauthorization = UnicodeString(not_empty=False, if_missing=None)
challenge_url = UnicodeString(not_empty=False, if_missing=None)
class Form_API_Domain_enable(_Form_Schema_Base):
domain_names = UnicodeString(not_empty=True)
class Form_API_Domain_disable(_Form_Schema_Base):
domain_names = UnicodeString(not_empty=True)
class Form_API_Domain_autocert(_Form_Schema_Base):
domain_name = UnicodeString(not_empty=True)
class Form_API_Domain_certificate_if_needed(_form_AcmeAccount_PrivateKey_core):
domain_name = UnicodeString(not_empty=True)
processing_strategy = OneOf(
model_utils.AcmeOrder_ProcessingStrategy.OPTIONS_IMMEDIATE,
not_empty=True,
)
# this is the `private_key_cycle` of the AcmeOrder renewals
private_key_cycle__renewal = OneOf(
model_utils.PrivateKeyCycle._options_AcmeOrder_private_key_cycle,
not_empty=True,
)
class Form_CertificateCAPreference__add(_Form_Schema_Base):
fingerprint_sha1 = UnicodeString(not_empty=True)
class Form_CertificateCAPreference__delete(_Form_Schema_Base):
slot = Int(not_empty=True)
fingerprint_sha1 = UnicodeString(not_empty=True)
class Form_CertificateCAPreference__prioritize(_Form_Schema_Base):
slot = Int(not_empty=True)
fingerprint_sha1 = UnicodeString(not_empty=True)
priority = OneOf(("increase", "decrease"), not_empty=True)
class Form_CertificateCA_Upload_Cert__file(_Form_Schema_Base):
cert_file = FieldStorageUploadConverter(not_empty=True)
cert_file_name = UnicodeString(not_empty=False, if_missing=None)
class Form_CertificateCAChain_Upload__file(_Form_Schema_Base):
chain_file = FieldStorageUploadConverter(not_empty=True)
chain_file_name = UnicodeString(not_empty=False, if_missing=None)
class Form_Certificate_Upload__file(_Form_Schema_Base):
private_key_file_pem = FieldStorageUploadConverter(not_empty=True)
certificate_file = FieldStorageUploadConverter(not_empty=True)
chain_file = FieldStorageUploadConverter(not_empty=True)
class Form_CertificateSigned_mark(_Form_Schema_Base):
action = OneOf(
(
"active",
"inactive",
"revoked",
# "renew_manual",
# "renew_auto",
"unrevoke",
),
not_empty=True,
)
class Form_CoverageAssuranceEvent_mark(_Form_Schema_Base):
action = OneOf(
("resolution"),
not_empty=True,
)
resolution = OneOf(
model_utils.CoverageAssuranceResolution.OPTIONS_ALL, not_empty=True
)
class Form_Domain_new(_Form_Schema_Base):
domain_name = UnicodeString(not_empty=True)
class Form_Domain_mark(_Form_Schema_Base):
action = OneOf(("active", "inactive"), not_empty=True)
class Form_Domain_search(_Form_Schema_Base):
domain = UnicodeString(not_empty=True)
class Form_Domain_AcmeDnsServer_new(_Form_Schema_Base):
acme_dns_server_id = Int(not_empty=True)
class Form_PrivateKey_new__autogenerate(_Form_Schema_Base):
bits = OneOf(("4096",), not_empty=True)
class Form_PrivateKey_new__full(_Form_Schema_Base):
private_key = UnicodeString(not_empty=False, if_missing=None)
private_key_file_pem = FieldStorageUploadConverter(not_empty=False, if_missing=None)
chained_validators = [
OnlyOneOf(("private_key", "private_key_file_pem"), not_empty=True)
]
class Form_PrivateKey_new__file(_Form_Schema_Base):
private_key_file_pem = FieldStorageUploadConverter(not_empty=True)
class Form_PrivateKey_mark(_Form_Schema_Base):
action = OneOf(
(
"compromised",
"active",
"inactive",
),
not_empty=True,
)
class Form_QueueCertificate_new_freeform(_form_AcmeAccount_PrivateKey_reuse):
# this is the `private_key_cycle` of the AcmeOrder renewals
private_key_cycle__renewal = OneOf(
model_utils.PrivateKeyCycle._options_AcmeOrder_private_key_cycle,
not_empty=True,
)
domain_names_http01 = UnicodeString(not_empty=False, if_missing=None)
domain_names_dns01 = UnicodeString(not_empty=False, if_missing=None)
chained_validators = [
RequireIfMissing("domain_names_http01", missing="domain_names_dns01"),
RequireIfMissing("domain_names_dns01", missing="domain_names_http01"),
]
class Form_QueueCertificate_new_structured(_form_AcmeAccount_PrivateKey_reuse):
queue_source = OneOf(
(
"AcmeOrder",
"CertificateSigned",
"UniqueFQDNSet",
),
not_empty=True,
)
acme_order = Int(not_empty=False, if_missing=None)
certificate_signed = Int(not_empty=False, if_missing=None)
unique_fqdn_set = Int(not_empty=False, if_missing=None)
# this is the `private_key_cycle` of the AcmeOrder renewals
private_key_cycle__renewal = OneOf(
model_utils.PrivateKeyCycle._options_AcmeOrder_private_key_cycle,
not_empty=True,
)
chained_validators = [
OnlyOneOf(
("acme_order", "certificate_signed", "unique_fqdn_set"), not_empty=True
)
]
class Form_QueueCertificate_mark(_Form_Schema_Base):
action = OneOf(("cancel",), not_empty=True)
class Form_QueueDomains_add(_Form_Schema_Base):
domain_names_http01 = UnicodeString(not_empty=True)
class Form_QueueDomain_mark(_Form_Schema_Base):
action = OneOf(("cancel",), not_empty=True)
class Form_QueueDomains_process(_form_AcmeAccount_PrivateKey_core):
"""just use the PrivateKey and AcmeAccount in the parent class"""
max_domains_per_certificate = Int(not_empty=True, max=100, min=1)
processing_strategy = OneOf(
model_utils.AcmeOrder_ProcessingStrategy.OPTIONS_ALL,
not_empty=True,
)
# this is the `private_key_cycle` of the AcmeOrder renewals
private_key_cycle__renewal = OneOf(
model_utils.PrivateKeyCycle._options_AcmeOrder_private_key_cycle,
not_empty=True,
)
class Form_UniqueFQDNSet_modify(_Form_Schema_Base):
domain_names_add = UnicodeString(not_empty=False)
domain_names_del = UnicodeString(not_empty=False)
class Form_UniqueFQDNSet_new(_Form_Schema_Base):
domain_names = UnicodeString(not_empty=True)
| 32.728353 | 99 | 0.708668 |
acf0ad40fa58f5e00b8a76295e2bb215657f90bb | 623 | py | Python | tests/test_db.py | Steve-D-Eckles/mountain-of-madness-flask-edition | dc14f052592774576d983de99f5106a468d2a77b | [
"MIT"
] | null | null | null | tests/test_db.py | Steve-D-Eckles/mountain-of-madness-flask-edition | dc14f052592774576d983de99f5106a468d2a77b | [
"MIT"
] | 2 | 2020-03-29T15:25:39.000Z | 2020-03-29T15:27:52.000Z | tests/test_db.py | Steve-D-Eckles/mountain-of-madness-flask-edition | dc14f052592774576d983de99f5106a468d2a77b | [
"MIT"
] | null | null | null | import sqlite3
import pytest
from madness.db import get_db
def test_get_close_db(app):
with app.app_context():
db = get_db()
assert db is get_db()
with pytest.raises(sqlite3.ProgrammingError) as e:
db.execute('SELECT 1')
assert 'closed' in str(e.value)
def test_init_db_command(runner, monkeypatch):
class Recorder(object):
called = False
def fake_init_db():
Recorder.called = True
monkeypatch.setattr('madness.db.init_db', fake_init_db)
result = runner.invoke(args=['init-db'])
assert 'Initialized' in result.output
assert Recorder.called
| 23.074074 | 59 | 0.680578 |
acf0aedbd6f15c55585e30f682792e9d8ac4af52 | 97 | py | Python | swigwin-3.0.12/Examples/python/import_packages/namespace_pkg/nonpkg.py | bostich83/atomic_swig | 5438f676d690ffddb09d88bbc1b51e3b38fa8c6a | [
"MIT"
] | null | null | null | swigwin-3.0.12/Examples/python/import_packages/namespace_pkg/nonpkg.py | bostich83/atomic_swig | 5438f676d690ffddb09d88bbc1b51e3b38fa8c6a | [
"MIT"
] | 2 | 2020-03-24T18:19:22.000Z | 2020-03-31T11:22:32.000Z | swigwin-3.0.12/Examples/python/import_packages/namespace_pkg/nonpkg.py | bostich83/atomic_swig | 5438f676d690ffddb09d88bbc1b51e3b38fa8c6a | [
"MIT"
] | 1 | 2020-06-18T12:09:05.000Z | 2020-06-18T12:09:05.000Z | # import robin as a module in the global namespace
import robin
assert(robin.run() == "AWAY!")
| 16.166667 | 50 | 0.71134 |
acf0af214d1c6e54ee8e0c2cddb2bba0391a00a3 | 614 | py | Python | pdm/data_processors/text.py | biniow/pdm | b9171c6ab97ad50a46938a25a4cb3f04014c0787 | [
"MIT"
] | null | null | null | pdm/data_processors/text.py | biniow/pdm | b9171c6ab97ad50a46938a25a4cb3f04014c0787 | [
"MIT"
] | null | null | null | pdm/data_processors/text.py | biniow/pdm | b9171c6ab97ad50a46938a25a4cb3f04014c0787 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Plugin for text format support"""
def write_text_output(data, output_file_path, verbose=False):
"""
Function writes data to text file
:param data: set of _PersonalData class objects
:param output_file_path: path to output file
:param verbose: defines if verbose mode
"""
result = []
for data_record in data:
result.append(str(data_record))
with open(output_file_path, 'w') as result_file:
txt_output = '\n====================\n'.join(result)
result_file.write(txt_output)
if verbose:
print(txt_output)
| 26.695652 | 61 | 0.628664 |
acf0b047901826e0b98cc955a796edb8fb0c08d0 | 367 | py | Python | objectdanclas.py | yanajuliana15/belajar-python | 7d63b0852d1510933bd4e3e21a384d2011d281f5 | [
"DOC"
] | null | null | null | objectdanclas.py | yanajuliana15/belajar-python | 7d63b0852d1510933bd4e3e21a384d2011d281f5 | [
"DOC"
] | null | null | null | objectdanclas.py | yanajuliana15/belajar-python | 7d63b0852d1510933bd4e3e21a384d2011d281f5 | [
"DOC"
] | null | null | null | class Employee:
'Common base class for all employees'
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print ("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print ("Name : ", self.name, ", Salary: ", self.salary) | 26.214286 | 62 | 0.629428 |
acf0b086aff64ea1c3341ec235a3a024ae527e1c | 67,150 | py | Python | lib/matplotlib/contour.py | jbbrokaw/matplotlib | 86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2015-01-26T14:15:23.000Z | 2015-01-26T14:15:23.000Z | lib/matplotlib/contour.py | jbbrokaw/matplotlib | 86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2021-05-10T17:57:41.000Z | 2021-07-26T16:23:09.000Z | lib/matplotlib/contour.py | jbbrokaw/matplotlib | 86ec1b6fc5628bfb2d09797c58d7eed0ca8c2427 | [
"MIT",
"BSD-3-Clause"
] | 1 | 2015-12-21T07:24:54.000Z | 2015-12-21T07:24:54.000Z | """
These are classes to support contour plotting and
labelling for the axes class
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
import matplotlib as mpl
import numpy as np
from numpy import ma
import matplotlib._cntr as _cntr
import matplotlib.path as mpath
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.collections as mcoll
import matplotlib.font_manager as font_manager
import matplotlib.text as text
import matplotlib.cbook as cbook
import matplotlib.mlab as mlab
import matplotlib.mathtext as mathtext
import matplotlib.patches as mpatches
import matplotlib.texmanager as texmanager
import matplotlib.transforms as mtrans
# Import needed for adding manual selection capability to clabel
from matplotlib.blocking_input import BlockingContourLabeler
# We can't use a single line collection for contour because a line
# collection can have only a single line style, and we want to be able to have
# dashed negative contours, for example, and solid positive contours.
# We could use a single polygon collection for filled contours, but it
# seems better to keep line and filled contours similar, with one collection
# per level.
class ClabelText(text.Text):
"""
Unlike the ordinary text, the get_rotation returns an updated
angle in the pixel coordinate assuming that the input rotation is
an angle in data coordinate (or whatever transform set).
"""
def get_rotation(self):
angle = text.Text.get_rotation(self)
trans = self.get_transform()
x, y = self.get_position()
new_angles = trans.transform_angles(np.array([angle]),
np.array([[x, y]]))
return new_angles[0]
class ContourLabeler(object):
"""Mixin to provide labelling capability to ContourSet"""
def clabel(self, *args, **kwargs):
"""
Label a contour plot.
Call signature::
clabel(cs, **kwargs)
Adds labels to line contours in *cs*, where *cs* is a
:class:`~matplotlib.contour.ContourSet` object returned by
contour.
::
clabel(cs, v, **kwargs)
only labels contours listed in *v*.
Optional keyword arguments:
*fontsize*:
size in points or relative size e.g., 'smaller', 'x-large'
*colors*:
- if *None*, the color of each label matches the color of
the corresponding contour
- if one string color, e.g., *colors* = 'r' or *colors* =
'red', all labels will be plotted in this color
- if a tuple of matplotlib color args (string, float, rgb, etc),
different labels will be plotted in different colors in the order
specified
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
*fmt*:
a format string for the label. Default is '%1.3f'
Alternatively, this can be a dictionary matching contour
levels with arbitrary strings to use for each contour level
(i.e., fmt[level]=string), or it can be any callable, such
as a :class:`~matplotlib.ticker.Formatter` instance, that
returns a string when called with a numeric contour level.
*manual*:
if *True*, contour labels will be placed manually using
mouse clicks. Click the first button near a contour to
add a label, click the second button (or potentially both
mouse buttons at once) to finish adding labels. The third
button can be used to remove the last label added, but
only if labels are not inline. Alternatively, the keyboard
can be used to select label locations (enter to end label
placement, delete or backspace act like the third mouse button,
and any other key will select a label location).
*manual* can be an iterable object of x,y tuples. Contour labels
will be created as if mouse is clicked at each x,y positions.
*rightside_up*:
if *True* (default), label rotations will always be plus
or minus 90 degrees from level.
*use_clabeltext*:
if *True* (default is False), ClabelText class (instead of
matplotlib.Text) is used to create labels. ClabelText
recalculates rotation angles of texts during the drawing time,
therefore this can be used if aspect of the axes changes.
.. plot:: mpl_examples/pylab_examples/contour_demo.py
"""
"""
NOTES on how this all works:
clabel basically takes the input arguments and uses them to
add a list of "label specific" attributes to the ContourSet
object. These attributes are all of the form label* and names
should be fairly self explanatory.
Once these attributes are set, clabel passes control to the
labels method (case of automatic label placement) or
BlockingContourLabeler (case of manual label placement).
"""
fontsize = kwargs.get('fontsize', None)
inline = kwargs.get('inline', 1)
inline_spacing = kwargs.get('inline_spacing', 5)
self.labelFmt = kwargs.get('fmt', '%1.3f')
_colors = kwargs.get('colors', None)
self._use_clabeltext = kwargs.get('use_clabeltext', False)
# Detect if manual selection is desired and remove from argument list
self.labelManual = kwargs.get('manual', False)
self.rightside_up = kwargs.get('rightside_up', True)
if len(args) == 0:
levels = self.levels
indices = list(xrange(len(self.cvalues)))
elif len(args) == 1:
levlabs = list(args[0])
indices, levels = [], []
for i, lev in enumerate(self.levels):
if lev in levlabs:
indices.append(i)
levels.append(lev)
if len(levels) < len(levlabs):
msg = "Specified levels " + str(levlabs)
msg += "\n don't match available levels "
msg += str(self.levels)
raise ValueError(msg)
else:
raise TypeError("Illegal arguments to clabel, see help(clabel)")
self.labelLevelList = levels
self.labelIndiceList = indices
self.labelFontProps = font_manager.FontProperties()
if fontsize is None:
font_size = int(self.labelFontProps.get_size_in_points())
else:
if type(fontsize) not in [int, float, str]:
raise TypeError("Font size must be an integer number.")
# Can't it be floating point, as indicated in line above?
else:
if type(fontsize) == str:
font_size = int(self.labelFontProps.get_size_in_points())
else:
self.labelFontProps.set_size(fontsize)
font_size = fontsize
self.labelFontSizeList = [font_size] * len(levels)
if _colors is None:
self.labelMappable = self
self.labelCValueList = np.take(self.cvalues, self.labelIndiceList)
else:
cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList))
self.labelCValueList = list(xrange(len(self.labelLevelList)))
self.labelMappable = cm.ScalarMappable(cmap=cmap,
norm=colors.NoNorm())
#self.labelTexts = [] # Initialized in ContourSet.__init__
#self.labelCValues = [] # same
self.labelXYs = []
if cbook.iterable(self.labelManual):
for x, y in self.labelManual:
self.add_label_near(x, y, inline,
inline_spacing)
elif self.labelManual:
print('Select label locations manually using first mouse button.')
print('End manual selection with second mouse button.')
if not inline:
print('Remove last label by clicking third mouse button.')
blocking_contour_labeler = BlockingContourLabeler(self)
blocking_contour_labeler(inline, inline_spacing)
else:
self.labels(inline, inline_spacing)
# Hold on to some old attribute names. These are deprecated and will
# be removed in the near future (sometime after 2008-08-01), but
# keeping for now for backwards compatibility
self.cl = self.labelTexts
self.cl_xy = self.labelXYs
self.cl_cvalues = self.labelCValues
self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts)
return self.labelTextsList
def print_label(self, linecontour, labelwidth):
"Return *False* if contours are too short for a label."
lcsize = len(linecontour)
if lcsize > 10 * labelwidth:
return True
xmax = np.amax(linecontour[:, 0])
xmin = np.amin(linecontour[:, 0])
ymax = np.amax(linecontour[:, 1])
ymin = np.amin(linecontour[:, 1])
lw = labelwidth
if (xmax - xmin) > 1.2 * lw or (ymax - ymin) > 1.2 * lw:
return True
else:
return False
def too_close(self, x, y, lw):
"Return *True* if a label is already near this location."
for loc in self.labelXYs:
d = np.sqrt((x - loc[0]) ** 2 + (y - loc[1]) ** 2)
if d < 1.2 * lw:
return True
return False
def get_label_coords(self, distances, XX, YY, ysize, lw):
"""
Return x, y, and the index of a label location.
Labels are plotted at a location with the smallest
deviation of the contour from a straight line
unless there is another label nearby, in which case
the next best place on the contour is picked up.
If all such candidates are rejected, the beginning
of the contour is chosen.
"""
hysize = int(ysize / 2)
adist = np.argsort(distances)
for ind in adist:
x, y = XX[ind][hysize], YY[ind][hysize]
if self.too_close(x, y, lw):
continue
return x, y, ind
ind = adist[0]
x, y = XX[ind][hysize], YY[ind][hysize]
return x, y, ind
def get_label_width(self, lev, fmt, fsize):
"""
Return the width of the label in points.
"""
if not cbook.is_string_like(lev):
lev = self.get_text(lev, fmt)
lev, ismath = text.Text.is_math_text(lev)
if ismath == 'TeX':
if not hasattr(self, '_TeX_manager'):
self._TeX_manager = texmanager.TexManager()
lw, _, _ = self._TeX_manager.get_text_width_height_descent(lev,
fsize)
elif ismath:
if not hasattr(self, '_mathtext_parser'):
self._mathtext_parser = mathtext.MathTextParser('bitmap')
img, _ = self._mathtext_parser.parse(lev, dpi=72,
prop=self.labelFontProps)
lw = img.get_width() # at dpi=72, the units are PostScript points
else:
# width is much less than "font size"
lw = (len(lev)) * fsize * 0.6
return lw
def get_real_label_width(self, lev, fmt, fsize):
"""
This computes actual onscreen label width.
This uses some black magic to determine onscreen extent of non-drawn
label. This magic may not be very robust.
This method is not being used, and may be modified or removed.
"""
# Find middle of axes
xx = np.mean(np.asarray(self.ax.axis()).reshape(2, 2), axis=1)
# Temporarily create text object
t = text.Text(xx[0], xx[1])
self.set_label_props(t, self.get_text(lev, fmt), 'k')
# Some black magic to get onscreen extent
# NOTE: This will only work for already drawn figures, as the canvas
# does not have a renderer otherwise. This is the reason this function
# can't be integrated into the rest of the code.
bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer)
# difference in pixel extent of image
lw = np.diff(bbox.corners()[0::2, 0])[0]
return lw
def set_label_props(self, label, text, color):
"set the label properties - color, fontsize, text"
label.set_text(text)
label.set_color(color)
label.set_fontproperties(self.labelFontProps)
label.set_clip_box(self.ax.bbox)
def get_text(self, lev, fmt):
"get the text of the label"
if cbook.is_string_like(lev):
return lev
else:
if isinstance(fmt, dict):
return fmt[lev]
elif six.callable(fmt):
return fmt(lev)
else:
return fmt % lev
def locate_label(self, linecontour, labelwidth):
"""
Find a good place to plot a label (relatively flat
part of the contour).
"""
nsize = len(linecontour)
if labelwidth > 1:
xsize = int(np.ceil(nsize / labelwidth))
else:
xsize = 1
if xsize == 1:
ysize = nsize
else:
ysize = int(labelwidth)
XX = np.resize(linecontour[:, 0], (xsize, ysize))
YY = np.resize(linecontour[:, 1], (xsize, ysize))
#I might have fouled up the following:
yfirst = YY[:, 0].reshape(xsize, 1)
ylast = YY[:, -1].reshape(xsize, 1)
xfirst = XX[:, 0].reshape(xsize, 1)
xlast = XX[:, -1].reshape(xsize, 1)
s = (yfirst - YY) * (xlast - xfirst) - (xfirst - XX) * (ylast - yfirst)
L = np.sqrt((xlast - xfirst) ** 2 + (ylast - yfirst) ** 2).ravel()
dist = np.add.reduce(([(abs(s)[i] / L[i]) for i in range(xsize)]), -1)
x, y, ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth)
#print 'ind, x, y', ind, x, y
# There must be a more efficient way...
lc = [tuple(l) for l in linecontour]
dind = lc.index((x, y))
#print 'dind', dind
#dind = list(linecontour).index((x,y))
return x, y, dind
def calc_label_rot_and_inline(self, slc, ind, lw, lc=None, spacing=5):
"""
This function calculates the appropriate label rotation given
the linecontour coordinates in screen units, the index of the
label location and the label width.
It will also break contour and calculate inlining if *lc* is
not empty (lc defaults to the empty list if None). *spacing*
is the space around the label in pixels to leave empty.
Do both of these tasks at once to avoid calling mlab.path_length
multiple times, which is relatively costly.
The method used here involves calculating the path length
along the contour in pixel coordinates and then looking
approximately label width / 2 away from central point to
determine rotation and then to break contour if desired.
"""
if lc is None:
lc = []
# Half the label width
hlw = lw / 2.0
# Check if closed and, if so, rotate contour so label is at edge
closed = mlab.is_closed_polygon(slc)
if closed:
slc = np.r_[slc[ind:-1], slc[:ind + 1]]
if len(lc): # Rotate lc also if not empty
lc = np.r_[lc[ind:-1], lc[:ind + 1]]
ind = 0
# Path length in pixel space
pl = mlab.path_length(slc)
pl = pl - pl[ind]
# Use linear interpolation to get points around label
xi = np.array([-hlw, hlw])
if closed: # Look at end also for closed contours
dp = np.array([pl[-1], 0])
else:
dp = np.zeros_like(xi)
ll = mlab.less_simple_linear_interpolation(pl, slc, dp + xi,
extrap=True)
# get vector in pixel space coordinates from one point to other
dd = np.diff(ll, axis=0).ravel()
# Get angle of vector - must be calculated in pixel space for
# text rotation to work correctly
if np.all(dd == 0): # Must deal with case of zero length label
rotation = 0.0
else:
rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi
if self.rightside_up:
# Fix angle so text is never upside-down
if rotation > 90:
rotation = rotation - 180.0
if rotation < -90:
rotation = 180.0 + rotation
# Break contour if desired
nlc = []
if len(lc):
# Expand range by spacing
xi = dp + xi + np.array([-spacing, spacing])
# Get indices near points of interest
I = mlab.less_simple_linear_interpolation(
pl, np.arange(len(pl)), xi, extrap=False)
# If those indices aren't beyond contour edge, find x,y
if (not np.isnan(I[0])) and int(I[0]) != I[0]:
xy1 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[0]])
if (not np.isnan(I[1])) and int(I[1]) != I[1]:
xy2 = mlab.less_simple_linear_interpolation(
pl, lc, [xi[1]])
# Round to integer values but keep as float
# To allow check against nan below
I = [np.floor(I[0]), np.ceil(I[1])]
# Actually break contours
if closed:
# This will remove contour if shorter than label
if np.all(~np.isnan(I)):
nlc.append(np.r_[xy2, lc[int(I[1]):int(I[0]) + 1], xy1])
else:
# These will remove pieces of contour if they have length zero
if not np.isnan(I[0]):
nlc.append(np.r_[lc[:int(I[0]) + 1], xy1])
if not np.isnan(I[1]):
nlc.append(np.r_[xy2, lc[int(I[1]):]])
# The current implementation removes contours completely
# covered by labels. Uncomment line below to keep
# original contour if this is the preferred behavior.
#if not len(nlc): nlc = [ lc ]
return rotation, nlc
def _get_label_text(self, x, y, rotation):
dx, dy = self.ax.transData.inverted().transform_point((x, y))
t = text.Text(dx, dy, rotation=rotation,
horizontalalignment='center',
verticalalignment='center')
return t
def _get_label_clabeltext(self, x, y, rotation):
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
transDataInv = self.ax.transData.inverted()
dx, dy = transDataInv.transform_point((x, y))
drotation = transDataInv.transform_angles(np.array([rotation]),
np.array([[x, y]]))
t = ClabelText(dx, dy, rotation=drotation[0],
horizontalalignment='center',
verticalalignment='center')
return t
def _add_label(self, t, x, y, lev, cvalue):
color = self.labelMappable.to_rgba(cvalue, alpha=self.alpha)
_text = self.get_text(lev, self.labelFmt)
self.set_label_props(t, _text, color)
self.labelTexts.append(t)
self.labelCValues.append(cvalue)
self.labelXYs.append((x, y))
# Add label to plot here - useful for manual mode label selection
self.ax.add_artist(t)
def add_label(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`~matplotlib.text.Text` class.
"""
t = self._get_label_text(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_clabeltext(self, x, y, rotation, lev, cvalue):
"""
Add contour label using :class:`ClabelText` class.
"""
# x, y, rotation is given in pixel coordinate. Convert them to
# the data coordinate and create a label using ClabelText
# class. This way, the roation of the clabel is along the
# contour line always.
t = self._get_label_clabeltext(x, y, rotation)
self._add_label(t, x, y, lev, cvalue)
def add_label_near(self, x, y, inline=True, inline_spacing=5,
transform=None):
"""
Add a label near the point (x, y). If transform is None
(default), (x, y) is in data coordinates; if transform is
False, (x, y) is in display coordinates; otherwise, the
specified transform will be used to translate (x, y) into
display coordinates.
*inline*:
controls whether the underlying contour is removed or
not. Default is *True*.
*inline_spacing*:
space in pixels to leave on each side of label when
placing inline. Defaults to 5. This spacing will be
exact for labels at locations where the contour is
straight, less so for labels on curved contours.
"""
if transform is None:
transform = self.ax.transData
if transform:
x, y = transform.transform_point((x, y))
# find the nearest contour _in screen units_
conmin, segmin, imin, xmin, ymin = self.find_nearest_contour(
x, y, self.labelIndiceList)[:5]
# The calc_label_rot_and_inline routine requires that (xmin,ymin)
# be a vertex in the path. So, if it isn't, add a vertex here
# grab the paths from the collections
paths = self.collections[conmin].get_paths()
# grab the correct segment
active_path = paths[segmin]
# grab it's verticies
lc = active_path.vertices
# sort out where the new vertex should be added data-units
xcmin = self.ax.transData.inverted().transform_point([xmin, ymin])
# if there isn't a vertex close enough
if not np.allclose(xcmin, lc[imin]):
# insert new data into the vertex list
lc = np.r_[lc[:imin], np.array(xcmin)[None, :], lc[imin:]]
# replace the path with the new one
paths[segmin] = mpath.Path(lc)
# Get index of nearest level in subset of levels used for labeling
lmin = self.labelIndiceList.index(conmin)
# Coordinates of contour
paths = self.collections[conmin].get_paths()
lc = paths[segmin].vertices
# In pixel/screen space
slc = self.ax.transData.transform(lc)
# Get label width for rotating labels and breaking contours
lw = self.get_label_width(self.labelLevelList[lmin],
self.labelFmt, self.labelFontSizeList[lmin])
# Figure out label rotation.
if inline:
lcarg = lc
else:
lcarg = None
rotation, nlc = self.calc_label_rot_and_inline(
slc, imin, lw, lcarg,
inline_spacing)
self.add_label(xmin, ymin, rotation, self.labelLevelList[lmin],
self.labelCValueList[lmin])
if inline:
# Remove old, not looping over paths so we can do this up front
paths.pop(segmin)
# Add paths if not empty or single point
for n in nlc:
if len(n) > 1:
paths.append(mpath.Path(n))
def pop_label(self, index=-1):
"""Defaults to removing last label, but any index can be supplied"""
self.labelCValues.pop(index)
t = self.labelTexts.pop(index)
t.remove()
def labels(self, inline, inline_spacing):
if self._use_clabeltext:
add_label = self.add_label_clabeltext
else:
add_label = self.add_label
for icon, lev, fsize, cvalue in zip(
self.labelIndiceList, self.labelLevelList,
self.labelFontSizeList, self.labelCValueList):
con = self.collections[icon]
trans = con.get_transform()
lw = self.get_label_width(lev, self.labelFmt, fsize)
lw *= self.ax.figure.dpi / 72.0 # scale to screen coordinates
additions = []
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices # Line contour
slc0 = trans.transform(lc) # Line contour in screen coords
# For closed polygons, add extra point to avoid division by
# zero in print_label and locate_label. Other than these
# functions, this is not necessary and should probably be
# eventually removed.
if mlab.is_closed_polygon(lc):
slc = np.r_[slc0, slc0[1:2, :]]
else:
slc = slc0
# Check if long enough for a label
if self.print_label(slc, lw):
x, y, ind = self.locate_label(slc, lw)
if inline:
lcarg = lc
else:
lcarg = None
rotation, new = self.calc_label_rot_and_inline(
slc0, ind, lw, lcarg,
inline_spacing)
# Actually add the label
add_label(x, y, rotation, lev, cvalue)
# If inline, add new contours
if inline:
for n in new:
# Add path if not empty or single point
if len(n) > 1:
additions.append(mpath.Path(n))
else: # If not adding label, keep old path
additions.append(linepath)
# After looping over all segments on a contour, remove old
# paths and add new ones if inlining
if inline:
del paths[:]
paths.extend(additions)
def _find_closest_point_on_leg(p1, p2, p0):
"""find closest point to p0 on line segment connecting p1 and p2"""
# handle degenerate case
if np.all(p2 == p1):
d = np.sum((p0 - p1)**2)
return d, p1
d21 = p2 - p1
d01 = p0 - p1
# project on to line segment to find closest point
proj = np.dot(d01, d21) / np.dot(d21, d21)
if proj < 0:
proj = 0
if proj > 1:
proj = 1
pc = p1 + proj * d21
# find squared distance
d = np.sum((pc-p0)**2)
return d, pc
def _find_closest_point_on_path(lc, point):
"""
lc: coordinates of vertices
point: coordinates of test point
"""
# find index of closest vertex for this segment
ds = np.sum((lc - point[None, :])**2, 1)
imin = np.argmin(ds)
dmin = np.inf
xcmin = None
legmin = (None, None)
closed = mlab.is_closed_polygon(lc)
# build list of legs before and after this vertex
legs = []
if imin > 0 or closed:
legs.append(((imin-1) % len(lc), imin))
if imin < len(lc) - 1 or closed:
legs.append((imin, (imin+1) % len(lc)))
for leg in legs:
d, xc = _find_closest_point_on_leg(lc[leg[0]], lc[leg[1]], point)
if d < dmin:
dmin = d
xcmin = xc
legmin = leg
return (dmin, xcmin, legmin)
class ContourSet(cm.ScalarMappable, ContourLabeler):
"""
Store a set of contour lines or filled regions.
User-callable method: clabel
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
a silent_list of LineCollections or PolyCollections
levels:
contour levels
layers:
same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors`.
"""
def __init__(self, ax, *args, **kwargs):
"""
Draw contour lines or filled regions, depending on
whether keyword arg 'filled' is *False* (default) or *True*.
The first three arguments must be:
*ax*: axes object.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the contour
levels.
*allsegs*: [level0segs, level1segs, ...]
List of all the polygon segments for all the *levels*.
For contour lines ``len(allsegs) == len(levels)``, and for
filled contour regions ``len(allsegs) = len(levels)-1``.
level0segs = [polygon0, polygon1, ...]
polygon0 = array_like [[x0,y0], [x1,y1], ...]
*allkinds*: *None* or [level0kinds, level1kinds, ...]
Optional list of all the polygon vertex kinds (code types), as
described and used in Path. This is used to allow multiply-
connected paths such as holes within filled polygons.
If not *None*, len(allkinds) == len(allsegs).
level0kinds = [polygon0kinds, ...]
polygon0kinds = [vertexcode0, vertexcode1, ...]
If *allkinds* is not *None*, usually all polygons for a particular
contour level are grouped together so that
level0segs = [polygon0] and level0kinds = [polygon0kinds].
Keyword arguments are as described in
:class:`~matplotlib.contour.QuadContourSet` object.
**Examples:**
.. plot:: mpl_examples/misc/contour_manual.py
"""
self.ax = ax
self.levels = kwargs.get('levels', None)
self.filled = kwargs.get('filled', False)
self.linewidths = kwargs.get('linewidths', None)
self.linestyles = kwargs.get('linestyles', None)
self.hatches = kwargs.get('hatches', [None])
self.alpha = kwargs.get('alpha', None)
self.origin = kwargs.get('origin', None)
self.extent = kwargs.get('extent', None)
cmap = kwargs.get('cmap', None)
self.colors = kwargs.get('colors', None)
norm = kwargs.get('norm', None)
vmin = kwargs.get('vmin', None)
vmax = kwargs.get('vmax', None)
self.extend = kwargs.get('extend', 'neither')
self.antialiased = kwargs.get('antialiased', None)
if self.antialiased is None and self.filled:
self.antialiased = False # eliminate artifacts; we are not
# stroking the boundaries.
# The default for line contours will be taken from
# the LineCollection default, which uses the
# rcParams['lines.antialiased']
self.nchunk = kwargs.get('nchunk', 0)
self.locator = kwargs.get('locator', None)
if (isinstance(norm, colors.LogNorm)
or isinstance(self.locator, ticker.LogLocator)):
self.logscale = True
if norm is None:
norm = colors.LogNorm()
if self.extend is not 'neither':
raise ValueError('extend kwarg does not work yet with log '
' scale')
else:
self.logscale = False
if self.origin is not None:
assert(self.origin in ['lower', 'upper', 'image'])
if self.extent is not None:
assert(len(self.extent) == 4)
if self.colors is not None and cmap is not None:
raise ValueError('Either colors or cmap must be None')
if self.origin == 'image':
self.origin = mpl.rcParams['image.origin']
self._transform = kwargs.get('transform', None)
self._process_args(*args, **kwargs)
self._process_levels()
if self.colors is not None:
ncolors = len(self.levels)
if self.filled:
ncolors -= 1
i0 = 0
# Handle the case where colors are given for the extended
# parts of the contour.
extend_min = self.extend in ['min', 'both']
extend_max = self.extend in ['max', 'both']
use_set_under_over = False
# if we are extending the lower end, and we've been given enough
# colors then skip the first color in the resulting cmap. For the
# extend_max case we don't need to worry about passing more colors
# than ncolors as ListedColormap will clip.
total_levels = ncolors + int(extend_min) + int(extend_max)
if (len(self.colors) == total_levels and
any([extend_min, extend_max])):
use_set_under_over = True
if extend_min:
i0 = 1
cmap = colors.ListedColormap(self.colors[i0:None], N=ncolors)
if use_set_under_over:
if extend_min:
cmap.set_under(self.colors[0])
if extend_max:
cmap.set_over(self.colors[-1])
if self.filled:
self.collections = cbook.silent_list('mcoll.PathCollection')
else:
self.collections = cbook.silent_list('mcoll.LineCollection')
# label lists must be initialized here
self.labelTexts = []
self.labelCValues = []
kw = {'cmap': cmap}
if norm is not None:
kw['norm'] = norm
# sets self.cmap, norm if needed;
cm.ScalarMappable.__init__(self, **kw)
if vmin is not None:
self.norm.vmin = vmin
if vmax is not None:
self.norm.vmax = vmax
self._process_colors()
self.allsegs, self.allkinds = self._get_allsegs_and_allkinds()
if self.filled:
if self.linewidths is not None:
warnings.warn('linewidths is ignored by contourf')
# Lower and upper contour levels.
lowers, uppers = self._get_lowers_and_uppers()
# Ensure allkinds can be zipped below.
if self.allkinds is None:
self.allkinds = [None] * len(self.allsegs)
for level, level_upper, segs, kinds in \
zip(lowers, uppers, self.allsegs, self.allkinds):
paths = self._make_paths(segs, kinds)
# Default zorder taken from Collection
zorder = kwargs.get('zorder', 1)
col = mcoll.PathCollection(
paths,
antialiaseds=(self.antialiased,),
edgecolors='none',
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
else:
tlinewidths = self._process_linewidths()
self.tlinewidths = tlinewidths
tlinestyles = self._process_linestyles()
aa = self.antialiased
if aa is not None:
aa = (self.antialiased,)
for level, width, lstyle, segs in \
zip(self.levels, tlinewidths, tlinestyles, self.allsegs):
# Default zorder taken from LineCollection
zorder = kwargs.get('zorder', 2)
col = mcoll.LineCollection(
segs,
antialiaseds=aa,
linewidths=width,
linestyle=[lstyle],
alpha=self.alpha,
transform=self.get_transform(),
zorder=zorder)
col.set_label('_nolegend_')
self.ax.add_collection(col, autolim=False)
self.collections.append(col)
self.changed() # set the colors
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform`
instance used by this ContourSet.
"""
if self._transform is None:
self._transform = self.ax.transData
elif (not isinstance(self._transform, mtrans.Transform)
and hasattr(self._transform, '_as_mpl_transform')):
self._transform = self._transform._as_mpl_transform(self.ax)
return self._transform
def __getstate__(self):
state = self.__dict__.copy()
# the C object Cntr cannot currently be pickled. This isn't a big issue
# as it is not actually used once the contour has been calculated
state['Cntr'] = None
return state
def legend_elements(self, variable_name='x', str_format=str):
"""
Return a list of artist and labels suitable for passing through
to :func:`plt.legend` which represent this ContourSet.
Args:
*variable_name*: the string used inside the inequality used
on the labels
*str_format*: function used to format the numbers in the labels
"""
artists = []
labels = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
n_levels = len(self.collections)
for i, (collection, lower, upper) in enumerate(
zip(self.collections, lowers, uppers)):
patch = mpatches.Rectangle(
(0, 0), 1, 1,
facecolor=collection.get_facecolor()[0],
hatch=collection.get_hatch(),
alpha=collection.get_alpha())
artists.append(patch)
lower = str_format(lower)
upper = str_format(upper)
if i == 0 and self.extend in ('min', 'both'):
labels.append(r'$%s \leq %s$' % (variable_name,
lower))
elif i == n_levels - 1 and self.extend in ('max', 'both'):
labels.append(r'$%s > %s$' % (variable_name,
upper))
else:
labels.append(r'$%s < %s \leq %s$' % (lower,
variable_name,
upper))
else:
for collection, level in zip(self.collections, self.levels):
patch = mcoll.LineCollection(None)
patch.update_from(collection)
artists.append(patch)
# format the level for insertion into the labels
level = str_format(level)
labels.append(r'$%s = %s$' % (variable_name, level))
return artists, labels
def _process_args(self, *args, **kwargs):
"""
Process *args* and *kwargs*; override in derived classes.
Must set self.levels, self.zmin and self.zmax, and update axes
limits.
"""
self.levels = args[0]
self.allsegs = args[1]
self.allkinds = len(args) > 2 and args[2] or None
self.zmax = np.amax(self.levels)
self.zmin = np.amin(self.levels)
self._auto = False
# Check lengths of levels and allsegs.
if self.filled:
if len(self.allsegs) != len(self.levels) - 1:
raise ValueError('must be one less number of segments as '
'levels')
else:
if len(self.allsegs) != len(self.levels):
raise ValueError('must be same number of segments as levels')
# Check length of allkinds.
if (self.allkinds is not None and
len(self.allkinds) != len(self.allsegs)):
raise ValueError('allkinds has different length to allsegs')
# Determine x,y bounds and update axes data limits.
havelimits = False
for segs in self.allsegs:
for seg in segs:
seg = np.asarray(seg)
if havelimits:
min = np.minimum(min, seg.min(axis=0))
max = np.maximum(max, seg.max(axis=0))
else:
min = seg.min(axis=0)
max = seg.max(axis=0)
havelimits = True
if havelimits:
self.ax.update_datalim([min, max])
self.ax.autoscale_view(tight=True)
def _get_allsegs_and_allkinds(self):
"""
Override in derived classes to create and return allsegs and allkinds.
allkinds can be None.
"""
return self.allsegs, self.allkinds
def _get_lowers_and_uppers(self):
"""
Return (lowers,uppers) for filled contours.
"""
lowers = self._levels[:-1]
if self.zmin == lowers[0]:
# Include minimum values in lowest interval
lowers = lowers.copy() # so we don't change self._levels
if self.logscale:
lowers[0] = 0.99 * self.zmin
else:
lowers[0] -= 1
uppers = self._levels[1:]
return (lowers, uppers)
def _make_paths(self, segs, kinds):
if kinds is not None:
return [mpath.Path(seg, codes=kind)
for seg, kind in zip(segs, kinds)]
else:
return [mpath.Path(seg) for seg in segs]
def changed(self):
tcolors = [(tuple(rgba),)
for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)]
self.tcolors = tcolors
hatches = self.hatches * len(tcolors)
for color, hatch, collection in zip(tcolors, hatches,
self.collections):
if self.filled:
collection.set_facecolor(color)
# update the collection's hatch (may be None)
collection.set_hatch(hatch)
else:
collection.set_color(color)
for label, cv in zip(self.labelTexts, self.labelCValues):
label.set_alpha(self.alpha)
label.set_color(self.labelMappable.to_rgba(cv))
# add label colors
cm.ScalarMappable.changed(self)
def _autolev(self, z, N):
"""
Select contour levels to span the data.
We need two more levels for filled contours than for
line contours, because for the latter we need to specify
the lower and upper boundary of each range. For example,
a single contour boundary, say at z = 0, requires only
one contour line, but two filled regions, and therefore
three levels to provide boundaries for both regions.
"""
if self.locator is None:
if self.logscale:
self.locator = ticker.LogLocator()
else:
self.locator = ticker.MaxNLocator(N + 1)
zmax = self.zmax
zmin = self.zmin
lev = self.locator.tick_values(zmin, zmax)
self._auto = True
if self.filled:
return lev
# For line contours, drop levels outside the data range.
return lev[(lev > zmin) & (lev < zmax)]
def _contour_level_args(self, z, args):
"""
Determine the contour levels and store in self.levels.
"""
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
self._auto = False
if self.levels is None:
if len(args) == 0:
lev = self._autolev(z, 7)
else:
level_arg = args[0]
try:
if type(level_arg) == int:
lev = self._autolev(z, level_arg)
else:
lev = np.asarray(level_arg).astype(np.float64)
except:
raise TypeError(
"Last %s arg must give levels; see help(%s)" %
(fn, fn))
self.levels = lev
if self.filled and len(self.levels) < 2:
raise ValueError("Filled contours require at least 2 levels.")
def _process_levels(self):
"""
Assign values to :attr:`layers` based on :attr:`levels`,
adding extended layers as needed if contours are filled.
For line contours, layers simply coincide with levels;
a line is a thin layer. No extended levels are needed
with line contours.
"""
# The following attributes are no longer needed, and
# should be deprecated and removed to reduce confusion.
self.vmin = np.amin(self.levels)
self.vmax = np.amax(self.levels)
# Make a private _levels to include extended regions; we
# want to leave the original levels attribute unchanged.
# (Colorbar needs this even for line contours.)
self._levels = list(self.levels)
if self.extend in ('both', 'min'):
self._levels.insert(0, min(self.levels[0], self.zmin) - 1)
if self.extend in ('both', 'max'):
self._levels.append(max(self.levels[-1], self.zmax) + 1)
self._levels = np.asarray(self._levels)
if not self.filled:
self.layers = self.levels
return
# layer values are mid-way between levels
self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
# ...except that extended layers must be outside the
# normed range:
if self.extend in ('both', 'min'):
self.layers[0] = -np.inf
if self.extend in ('both', 'max'):
self.layers[-1] = np.inf
def _process_colors(self):
"""
Color argument processing for contouring.
Note that we base the color mapping on the contour levels
and layers, not on the actual range of the Z values. This
means we don't have to worry about bad values in Z, and we
always have the full dynamic range available for the selected
levels.
The color is based on the midpoint of the layer, except for
extended end layers. By default, the norm vmin and vmax
are the extreme values of the non-extended levels. Hence,
the layer color extremes are not the extreme values of
the colormap itself, but approach those values as the number
of levels increases. An advantage of this scheme is that
line contours, when added to filled contours, take on
colors that are consistent with those of the filled regions;
for example, a contour line on the boundary between two
regions will have a color intermediate between those
of the regions.
"""
self.monochrome = self.cmap.monochrome
if self.colors is not None:
# Generate integers for direct indexing.
i0, i1 = 0, len(self.levels)
if self.filled:
i1 -= 1
# Out of range indices for over and under:
if self.extend in ('both', 'min'):
i0 = -1
if self.extend in ('both', 'max'):
i1 += 1
self.cvalues = list(range(i0, i1))
self.set_norm(colors.NoNorm())
else:
self.cvalues = self.layers
self.set_array(self.levels)
self.autoscale_None()
if self.extend in ('both', 'max', 'min'):
self.norm.clip = False
# self.tcolors are set by the "changed" method
def _process_linewidths(self):
linewidths = self.linewidths
Nlev = len(self.levels)
if linewidths is None:
tlinewidths = [(mpl.rcParams['lines.linewidth'],)] * Nlev
else:
if not cbook.iterable(linewidths):
linewidths = [linewidths] * Nlev
else:
linewidths = list(linewidths)
if len(linewidths) < Nlev:
nreps = int(np.ceil(Nlev / len(linewidths)))
linewidths = linewidths * nreps
if len(linewidths) > Nlev:
linewidths = linewidths[:Nlev]
tlinewidths = [(w,) for w in linewidths]
return tlinewidths
def _process_linestyles(self):
linestyles = self.linestyles
Nlev = len(self.levels)
if linestyles is None:
tlinestyles = ['solid'] * Nlev
if self.monochrome:
neg_ls = mpl.rcParams['contour.negative_linestyle']
eps = - (self.zmax - self.zmin) * 1e-15
for i, lev in enumerate(self.levels):
if lev < eps:
tlinestyles[i] = neg_ls
else:
if cbook.is_string_like(linestyles):
tlinestyles = [linestyles] * Nlev
elif cbook.iterable(linestyles):
tlinestyles = list(linestyles)
if len(tlinestyles) < Nlev:
nreps = int(np.ceil(Nlev / len(linestyles)))
tlinestyles = tlinestyles * nreps
if len(tlinestyles) > Nlev:
tlinestyles = tlinestyles[:Nlev]
else:
raise ValueError("Unrecognized type for linestyles kwarg")
return tlinestyles
def get_alpha(self):
"""returns alpha to be applied to all ContourSet artists"""
return self.alpha
def set_alpha(self, alpha):
"""sets alpha for all ContourSet artists"""
self.alpha = alpha
self.changed()
def find_nearest_contour(self, x, y, indices=None, pixel=True):
"""
Finds contour that is closest to a point. Defaults to
measuring distance in pixels (screen space - useful for manual
contour labeling), but this can be controlled via a keyword
argument.
Returns a tuple containing the contour, segment, index of
segment, x & y of segment point and distance to minimum point.
Call signature::
conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour(
self, x, y, indices=None, pixel=True )
Optional keyword arguments:
*indices*:
Indexes of contour levels to consider when looking for
nearest point. Defaults to using all levels.
*pixel*:
If *True*, measure distance in pixel space, if not, measure
distance in axes space. Defaults to *True*.
"""
# This function uses a method that is probably quite
# inefficient based on converting each contour segment to
# pixel coordinates and then comparing the given point to
# those coordinates for each contour. This will probably be
# quite slow for complex contours, but for normal use it works
# sufficiently well that the time is not noticeable.
# Nonetheless, improvements could probably be made.
if indices is None:
indices = list(xrange(len(self.levels)))
dmin = np.inf
conmin = None
segmin = None
xmin = None
ymin = None
point = np.array([x, y])
for icon in indices:
con = self.collections[icon]
trans = con.get_transform()
paths = con.get_paths()
for segNum, linepath in enumerate(paths):
lc = linepath.vertices
# transfer all data points to screen coordinates if desired
if pixel:
lc = trans.transform(lc)
d, xc, leg = _find_closest_point_on_path(lc, point)
if d < dmin:
dmin = d
conmin = icon
segmin = segNum
imin = leg[1]
xmin = xc[0]
ymin = xc[1]
return (conmin, segmin, imin, xmin, ymin, dmin)
class QuadContourSet(ContourSet):
"""
Create and store a set of contour lines or filled regions.
User-callable method: :meth:`clabel`
Useful attributes:
ax:
The axes object in which the contours are drawn
collections:
A silent_list of LineCollections or PolyCollections
levels:
Contour levels
layers:
Same as levels for line contours; half-way between
levels for filled contours. See :meth:`_process_colors` method.
"""
def __init__(self, ax, *args, **kwargs):
"""
Calculate and draw contour lines or filled regions, depending
on whether keyword arg 'filled' is False (default) or True.
The first argument of the initializer must be an axes
object. The remaining arguments and keyword arguments
are described in QuadContourSet.contour_doc.
"""
ContourSet.__init__(self, ax, *args, **kwargs)
def _process_args(self, *args, **kwargs):
"""
Process args and kwargs.
"""
if isinstance(args[0], QuadContourSet):
C = args[0].Cntr
if self.levels is None:
self.levels = args[0].levels
self.zmin = args[0].zmin
self.zmax = args[0].zmax
else:
x, y, z = self._contour_args(args, kwargs)
_mask = ma.getmask(z)
if _mask is ma.nomask:
_mask = None
C = _cntr.Cntr(x, y, z.filled(), _mask)
t = self.get_transform()
# if the transform is not trans data, and some part of it
# contains transData, transform the xs and ys to data coordinates
if (t != self.ax.transData and
any(t.contains_branch_seperately(self.ax.transData))):
trans_to_data = t - self.ax.transData
pts = (np.vstack([x.flat, y.flat]).T)
transformed_pts = trans_to_data.transform(pts)
x = transformed_pts[..., 0]
y = transformed_pts[..., 1]
x0 = ma.minimum(x)
x1 = ma.maximum(x)
y0 = ma.minimum(y)
y1 = ma.maximum(y)
self.ax.update_datalim([(x0, y0), (x1, y1)])
self.ax.autoscale_view(tight=True)
self.Cntr = C
def _get_allsegs_and_allkinds(self):
"""
Create and return allsegs and allkinds by calling underlying C code.
"""
allsegs = []
if self.filled:
lowers, uppers = self._get_lowers_and_uppers()
allkinds = []
for level, level_upper in zip(lowers, uppers):
nlist = self.Cntr.trace(level, level_upper,
nchunk=self.nchunk)
nseg = len(nlist) // 2
segs = nlist[:nseg]
kinds = nlist[nseg:]
allsegs.append(segs)
allkinds.append(kinds)
else:
allkinds = None
for level in self.levels:
nlist = self.Cntr.trace(level)
nseg = len(nlist) // 2
segs = nlist[:nseg]
allsegs.append(segs)
return allsegs, allkinds
def _contour_args(self, args, kwargs):
if self.filled:
fn = 'contourf'
else:
fn = 'contour'
Nargs = len(args)
if Nargs <= 2:
z = ma.asarray(args[0], dtype=np.float64)
x, y = self._initialize_x_y(z)
args = args[1:]
elif Nargs <= 4:
x, y, z = self._check_xyz(args[:3], kwargs)
args = args[3:]
else:
raise TypeError("Too many arguments to %s; see help(%s)" %
(fn, fn))
z = ma.masked_invalid(z, copy=False)
self.zmax = ma.maximum(z)
self.zmin = ma.minimum(z)
if self.logscale and self.zmin <= 0:
z = ma.masked_where(z <= 0, z)
warnings.warn('Log scale: values of z <= 0 have been masked')
self.zmin = z.min()
self._contour_level_args(z, args)
return (x, y, z)
def _check_xyz(self, args, kwargs):
"""
For functions like contour, check that the dimensions
of the input arrays match; if x and y are 1D, convert
them to 2D using meshgrid.
Possible change: I think we should make and use an ArgumentError
Exception class (here and elsewhere).
"""
x, y = args[:2]
self.ax._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x = self.ax.convert_xunits(x)
y = self.ax.convert_yunits(y)
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
z = ma.asarray(args[2], dtype=np.float64)
if z.ndim != 2:
raise TypeError("Input z must be a 2D array.")
else:
Ny, Nx = z.shape
if x.ndim != y.ndim:
raise TypeError("Number of dimensions of x and y should match.")
if x.ndim == 1:
nx, = x.shape
ny, = y.shape
if nx != Nx:
raise TypeError("Length of x must be number of columns in z.")
if ny != Ny:
raise TypeError("Length of y must be number of rows in z.")
x, y = np.meshgrid(x, y)
elif x.ndim == 2:
if x.shape != z.shape:
raise TypeError("Shape of x does not match that of z: found "
"{0} instead of {1}.".format(x.shape, z.shape))
if y.shape != z.shape:
raise TypeError("Shape of y does not match that of z: found "
"{0} instead of {1}.".format(y.shape, z.shape))
else:
raise TypeError("Inputs x and y must be 1D or 2D.")
return x, y, z
def _initialize_x_y(self, z):
"""
Return X, Y arrays such that contour(Z) will match imshow(Z)
if origin is not None.
The center of pixel Z[i,j] depends on origin:
if origin is None, x = j, y = i;
if origin is 'lower', x = j + 0.5, y = i + 0.5;
if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5
If extent is not None, x and y will be scaled to match,
as in imshow.
If origin is None and extent is not None, then extent
will give the minimum and maximum values of x and y.
"""
if z.ndim != 2:
raise TypeError("Input must be a 2D array.")
else:
Ny, Nx = z.shape
if self.origin is None: # Not for image-matching.
if self.extent is None:
return np.meshgrid(np.arange(Nx), np.arange(Ny))
else:
x0, x1, y0, y1 = self.extent
x = np.linspace(x0, x1, Nx)
y = np.linspace(y0, y1, Ny)
return np.meshgrid(x, y)
# Match image behavior:
if self.extent is None:
x0, x1, y0, y1 = (0, Nx, 0, Ny)
else:
x0, x1, y0, y1 = self.extent
dx = float(x1 - x0) / Nx
dy = float(y1 - y0) / Ny
x = x0 + (np.arange(Nx) + 0.5) * dx
y = y0 + (np.arange(Ny) + 0.5) * dy
if self.origin == 'upper':
y = y[::-1]
return np.meshgrid(x, y)
contour_doc = """
Plot contours.
:func:`~matplotlib.pyplot.contour` and
:func:`~matplotlib.pyplot.contourf` draw contour lines and
filled contours, respectively. Except as noted, function
signatures and return values are the same for both versions.
:func:`~matplotlib.pyplot.contourf` differs from the MATLAB
version in that it does not draw the polygon edges.
To draw edges, add line contours with
calls to :func:`~matplotlib.pyplot.contour`.
Call signatures::
contour(Z)
make a contour plot of an array *Z*. The level values are chosen
automatically.
::
contour(X,Y,Z)
*X*, *Y* specify the (x, y) coordinates of the surface
::
contour(Z,N)
contour(X,Y,Z,N)
contour *N* automatically-chosen levels.
::
contour(Z,V)
contour(X,Y,Z,V)
draw contour lines at the values specified in sequence *V*
::
contourf(..., V)
fill the ``len(V)-1`` regions between the values in *V*
::
contour(Z, **kwargs)
Use keyword args to control colors, linewidth, origin, cmap ... see
below for more details.
*X* and *Y* must both be 2-D with the same shape as *Z*, or they
must both be 1-D such that ``len(X)`` is the number of columns in
*Z* and ``len(Y)`` is the number of rows in *Z*.
``C = contour(...)`` returns a
:class:`~matplotlib.contour.QuadContourSet` object.
Optional keyword arguments:
*colors*: [ *None* | string | (mpl_colors) ]
If *None*, the colormap specified by cmap will be used.
If a string, like 'r' or 'red', all levels will be plotted in this
color.
If a tuple of matplotlib color args (string, float, rgb, etc),
different levels will be plotted in different colors in the order
specified.
*alpha*: float
The alpha blending value
*cmap*: [ *None* | Colormap ]
A cm :class:`~matplotlib.colors.Colormap` instance or
*None*. If *cmap* is *None* and *colors* is *None*, a
default Colormap is used.
*norm*: [ *None* | Normalize ]
A :class:`matplotlib.colors.Normalize` instance for
scaling data values to colors. If *norm* is *None* and
*colors* is *None*, the default linear scaling is used.
*vmin*, *vmax*: [ *None* | scalar ]
If not *None*, either or both of these values will be
supplied to the :class:`matplotlib.colors.Normalize`
instance, overriding the default color scaling based on
*levels*.
*levels*: [level0, level1, ..., leveln]
A list of floating point numbers indicating the level
curves to draw; e.g., to draw just the zero contour pass
``levels=[0]``
*origin*: [ *None* | 'upper' | 'lower' | 'image' ]
If *None*, the first value of *Z* will correspond to the
lower left corner, location (0,0). If 'image', the rc
value for ``image.origin`` will be used.
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*extent*: [ *None* | (x0,x1,y0,y1) ]
If *origin* is not *None*, then *extent* is interpreted as
in :func:`matplotlib.pyplot.imshow`: it gives the outer
pixel boundaries. In this case, the position of Z[0,0]
is the center of the pixel, not a corner. If *origin* is
*None*, then (*x0*, *y0*) is the position of Z[0,0], and
(*x1*, *y1*) is the position of Z[-1,-1].
This keyword is not active if *X* and *Y* are specified in
the call to contour.
*locator*: [ *None* | ticker.Locator subclass ]
If *locator* is *None*, the default
:class:`~matplotlib.ticker.MaxNLocator` is used. The
locator is used to determine the contour levels if they
are not given explicitly via the *V* argument.
*extend*: [ 'neither' | 'both' | 'min' | 'max' ]
Unless this is 'neither', contour levels are automatically
added to one or both ends of the range so that all data
are included. These added ranges are then mapped to the
special colormap values which default to the ends of the
colormap range, but can be set via
:meth:`matplotlib.colors.Colormap.set_under` and
:meth:`matplotlib.colors.Colormap.set_over` methods.
*xunits*, *yunits*: [ *None* | registered units ]
Override axis units by specifying an instance of a
:class:`matplotlib.units.ConversionInterface`.
*antialiased*: [ *True* | *False* ]
enable antialiasing, overriding the defaults. For
filled contours, the default is *True*. For line contours,
it is taken from rcParams['lines.antialiased'].
contour-only keyword arguments:
*linewidths*: [ *None* | number | tuple of numbers ]
If *linewidths* is *None*, the default width in
``lines.linewidth`` in ``matplotlibrc`` is used.
If a number, all levels will be plotted with this linewidth.
If a tuple, different levels will be plotted with different
linewidths in the order specified.
*linestyles*: [ *None* | 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
If *linestyles* is *None*, the default is 'solid' unless
the lines are monochrome. In that case, negative
contours will take their linestyle from the ``matplotlibrc``
``contour.negative_linestyle`` setting.
*linestyles* can also be an iterable of the above strings
specifying a set of linestyles to be used. If this
iterable is shorter than the number of contour levels
it will be repeated as necessary.
contourf-only keyword arguments:
*nchunk*: [ 0 | integer ]
If 0, no subdivision of the domain. Specify a positive integer to
divide the domain into subdomains of roughly *nchunk* by *nchunk*
points. This may never actually be advantageous, so this option may
be removed. Chunking introduces artifacts at the chunk boundaries
unless *antialiased* is *False*.
*hatches*:
A list of cross hatch patterns to use on the filled areas.
If None, no hatching will be added to the contour.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
Note: contourf fills intervals that are closed at the top; that
is, for boundaries *z1* and *z2*, the filled region is::
z1 < z <= z2
There is one exception: if the lowest boundary coincides with
the minimum value of the *z* array, then that minimum value
will be included in the lowest interval.
**Examples:**
.. plot:: mpl_examples/pylab_examples/contour_demo.py
.. plot:: mpl_examples/pylab_examples/contourf_demo.py
"""
| 37.181617 | 79 | 0.561162 |
acf0b099bedf724f43151bbc05bae7e8353ca832 | 8,728 | py | Python | setup.py | sanderisbestok/detectron | 9e8f915bba26d7a6b16066638762a90f18c4c1c9 | [
"Apache-2.0"
] | null | null | null | setup.py | sanderisbestok/detectron | 9e8f915bba26d7a6b16066638762a90f18c4c1c9 | [
"Apache-2.0"
] | null | null | null | setup.py | sanderisbestok/detectron | 9e8f915bba26d7a6b16066638762a90f18c4c1c9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import glob
import os
import shutil
from os import path
from setuptools import find_packages, setup
from typing import List
import torch
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
from torch.utils.hipify import hipify_python
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 6], "Requires PyTorch >= 1.6"
def get_version():
init_py_path = path.join(path.abspath(path.dirname(__file__)), "detectron2", "__init__.py")
init_py = open(init_py_path, "r").readlines()
version_line = [l.strip() for l in init_py if l.startswith("__version__")][0]
version = version_line.split("=")[-1].strip().strip("'\"")
# The following is used to build release packages.
# Users should never use it.
suffix = os.getenv("D2_VERSION_SUFFIX", "")
version = version + suffix
if os.getenv("BUILD_NIGHTLY", "0") == "1":
from datetime import datetime
date_str = datetime.today().strftime("%y%m%d")
version = version + ".dev" + date_str
new_init_py = [l for l in init_py if not l.startswith("__version__")]
new_init_py.append('__version__ = "{}"\n'.format(version))
with open(init_py_path, "w") as f:
f.write("".join(new_init_py))
return version
def get_extensions():
this_dir = path.dirname(path.abspath(__file__))
extensions_dir = path.join(this_dir, "detectron2", "layers", "csrc")
main_source = path.join(extensions_dir, "vision.cpp")
sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = (
True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
)
hipify_ver = (
[int(x) for x in torch.utils.hipify.__version__.split(".")]
if hasattr(torch.utils.hipify, "__version__")
else [0, 0, 0]
)
if is_rocm_pytorch and hipify_ver < [1, 0, 0]: # TODO not needed since pt1.8
# Earlier versions of hipification and extension modules were not
# transparent, i.e. would require an explicit call to hipify, and the
# hipification would introduce "hip" subdirectories, possibly changing
# the relationship between source and header files.
# This path is maintained for backwards compatibility.
hipify_python.hipify(
project_directory=this_dir,
output_directory=this_dir,
includes="/detectron2/layers/csrc/*",
show_detailed=True,
is_pytorch_extension=True,
)
source_cuda = glob.glob(path.join(extensions_dir, "**", "hip", "*.hip")) + glob.glob(
path.join(extensions_dir, "hip", "*.hip")
)
shutil.copy(
"detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h",
"detectron2/layers/csrc/box_iou_rotated/hip/box_iou_rotated_utils.h",
)
shutil.copy(
"detectron2/layers/csrc/deformable/deform_conv.h",
"detectron2/layers/csrc/deformable/hip/deform_conv.h",
)
sources = [main_source] + sources
sources = [
s
for s in sources
if not is_rocm_pytorch or torch_ver < [1, 7] or not s.endswith("hip/vision.cpp")
]
else:
# common code between cuda and rocm platforms,
# for hipify version [1,0,0] and later.
source_cuda = glob.glob(path.join(extensions_dir, "**", "*.cu")) + glob.glob(
path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) or os.getenv(
"FORCE_CUDA", "0"
) == "1":
extension = CUDAExtension
sources += source_cuda
if not is_rocm_pytorch:
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-O3",
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
define_macros += [("WITH_HIP", None)]
extra_compile_args["nvcc"] = []
if torch_ver < [1, 7]:
# supported by https://github.com/pytorch/pytorch/pull/43931
CC = os.environ.get("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin={}".format(CC))
include_dirs = [extensions_dir]
ext_modules = [
extension(
"detectron2._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
def get_model_zoo_configs() -> List[str]:
"""
Return a list of configs to include in package for model zoo. Copy over these configs inside
detectron2/model_zoo.
"""
# Use absolute paths while symlinking.
source_configs_dir = path.join(path.dirname(path.realpath(__file__)), "configs")
destination = path.join(
path.dirname(path.realpath(__file__)), "detectron2", "model_zoo", "configs"
)
# Symlink the config directory inside package to have a cleaner pip install.
# Remove stale symlink/directory from a previous build.
if path.exists(source_configs_dir):
if path.islink(destination):
os.unlink(destination)
elif path.isdir(destination):
shutil.rmtree(destination)
if not path.exists(destination):
try:
os.symlink(source_configs_dir, destination)
except OSError:
# Fall back to copying if symlink fails: ex. on Windows.
shutil.copytree(source_configs_dir, destination)
config_paths = glob.glob("configs/**/*.yaml", recursive=True) + glob.glob(
"configs/**/*.py", recursive=True
)
return config_paths
# For projects that are relative small and provide features that are very close
# to detectron2's core functionalities, we install them under detectron2.projects
PROJECTS = {
"detectron2.projects.deeplab": "projects/DeepLab/deeplab",
"detectron2.projects.panoptic_deeplab": "projects/Panoptic-DeepLab/panoptic_deeplab",
"detectron2.projects.tridentnet": "projects/TridentNet/tridentnet",
}
setup(
name="detectron2",
version=get_version(),
author="FAIR",
url="https://github.com/facebookresearch/detectron2",
description="Detectron2 is FAIR's next-generation research "
"platform for object detection and segmentation.",
packages=find_packages(exclude=("configs", "tests*")) + list(PROJECTS.keys()),
package_dir=PROJECTS,
package_data={"detectron2.model_zoo": get_model_zoo_configs()},
python_requires=">=3.6",
install_requires=[
# Do not add opencv here. Just like pytorch, user should install
# opencv themselves, preferrably by OS's package manager, or by
# choosing the proper pypi package name at https://github.com/skvark/opencv-python
"termcolor>=1.1",
"Pillow>=7.1", # or use pillow-simd for better performance
"yacs>=0.1.6",
"tabulate",
"cloudpickle",
"matplotlib",
"tqdm>4.29.0",
"tensorboard",
# Lock version of fvcore/iopath because they may have breaking changes
# NOTE: when updating fvcore/iopath version, make sure fvcore depends
# on the same version of iopath.
"fvcore>=0.1.5,<0.1.6", # required like this to make it pip installable
"iopath>=0.1.7,<0.1.9",
"pycocotools>=2.0.2", # corresponds to https://github.com/ppwwyyxx/cocoapi
"future", # used by caffe2
"pydot", # used to save caffe2 SVGs
"dataclasses; python_version<'3.7'",
"omegaconf==2.1.0.dev22",
# When adding to the list, may need to update docs/requirements.txt
# or add mock in docs/conf.py
],
extras_require={
"all": [
"shapely",
"pygments>=2.2",
"psutil",
"hydra-core",
"panopticapi @ https://github.com/cocodataset/panopticapi/archive/master.zip",
],
"dev": [
"flake8==3.8.1",
"isort==4.3.21",
"black==20.8b1",
"flake8-bugbear",
"flake8-comprehensions",
],
},
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| 35.193548 | 97 | 0.623052 |
acf0b0ea21d46c58a9b2a11022ee775d09ee552f | 70 | py | Python | constants.py | Kasimir123/chemhelper | 1b089f410c2a5d793226e4b96e08ca7c58e46103 | [
"MIT"
] | null | null | null | constants.py | Kasimir123/chemhelper | 1b089f410c2a5d793226e4b96e08ca7c58e46103 | [
"MIT"
] | null | null | null | constants.py | Kasimir123/chemhelper | 1b089f410c2a5d793226e4b96e08ca7c58e46103 | [
"MIT"
] | null | null | null | # File for all of the constants
AVOGADRO_CONSTANT = 6.022 * (10 ** 23) | 35 | 38 | 0.7 |
acf0b18737409cae2cae2bb2bc04567c94481f35 | 46,009 | py | Python | nova/tests/test_db_api.py | 781778304/nova | 05aff1959c9f94dae095635133386418390efb37 | [
"Apache-2.0"
] | null | null | null | nova/tests/test_db_api.py | 781778304/nova | 05aff1959c9f94dae095635133386418390efb37 | [
"Apache-2.0"
] | null | null | null | nova/tests/test_db_api.py | 781778304/nova | 05aff1959c9f94dae095635133386418390efb37 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API"""
import datetime
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova.openstack.common import timeutils
from nova import test
from nova import utils
FLAGS = flags.FLAGS
def _setup_networking(instance_id, ip='1.2.3.4', flo_addr='1.2.1.2'):
ctxt = context.get_admin_context()
network_ref = db.project_get_networks(ctxt,
'fake',
associate=True)[0]
vif = {'address': '56:12:12:12:12:12',
'network_id': network_ref['id'],
'instance_id': instance_id}
vif_ref = db.virtual_interface_create(ctxt, vif)
fixed_ip = {'address': ip,
'network_id': network_ref['id'],
'virtual_interface_id': vif_ref['id'],
'allocated': True,
'instance_id': instance_id}
db.fixed_ip_create(ctxt, fixed_ip)
fix_ref = db.fixed_ip_get_by_address(ctxt, ip)
db.floating_ip_create(ctxt, {'address': flo_addr,
'fixed_ip_id': fix_ref['id']})
class DbApiTestCase(test.TestCase):
def setUp(self):
super(DbApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_instance_get_all_by_filters(self):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
db.instance_create(self.context, args)
db.instance_create(self.context, args)
result = db.instance_get_all_by_filters(self.context, {})
self.assertTrue(2, len(result))
def test_instance_get_all_by_filters_deleted(self):
args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
inst1 = db.instance_create(self.context, args1)
args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'}
inst2 = db.instance_create(self.context, args2)
db.instance_destroy(self.context.elevated(), inst1['uuid'])
result = db.instance_get_all_by_filters(self.context.elevated(), {})
self.assertEqual(2, len(result))
self.assertIn(inst1.id, [result[0].id, result[1].id])
self.assertIn(inst2.id, [result[0].id, result[1].id])
if inst1.id == result[0].id:
self.assertTrue(result[0].deleted)
else:
self.assertTrue(result[1].deleted)
def test_migration_get_all_unconfirmed(self):
ctxt = context.get_admin_context()
# Ensure no migrations are returned.
results = db.migration_get_all_unconfirmed(ctxt, 10)
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
values = {"status": "finished", "updated_at": updated_at}
migration = db.migration_create(ctxt, values)
results = db.migration_get_all_unconfirmed(ctxt, 10)
self.assertEqual(1, len(results))
db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at}
migration = db.migration_create(ctxt, values)
results = db.migration_get_all_unconfirmed(ctxt, 10)
self.assertEqual(0, len(results))
db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"})
def test_instance_get_all_hung_in_rebooting(self):
ctxt = context.get_admin_context()
# Ensure no instances are returned.
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
self.assertEqual(0, len(results))
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00)
values = {"task_state": "rebooting", "updated_at": updated_at}
instance = db.instance_create(ctxt, values)
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
self.assertEqual(1, len(results))
db.instance_update(ctxt, instance['uuid'], {"task_state": None})
# Ensure the newly rebooted instance is not returned.
updated_at = timeutils.utcnow()
values = {"task_state": "rebooting", "updated_at": updated_at}
instance = db.instance_create(ctxt, values)
results = db.instance_get_all_hung_in_rebooting(ctxt, 10)
self.assertEqual(0, len(results))
db.instance_update(ctxt, instance['uuid'], {"task_state": None})
def test_network_create_safe(self):
ctxt = context.get_admin_context()
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(ctxt, values)
self.assertNotEqual(None, network.uuid)
self.assertEqual(36, len(network.uuid))
db_network = db.network_get(ctxt, network.id)
self.assertEqual(network.uuid, db_network.uuid)
def test_network_delete_safe(self):
ctxt = context.get_admin_context()
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(ctxt, values)
db_network = db.network_get(ctxt, network.id)
values = {'network_id': network['id'], 'address': 'fake1'}
address1 = db.fixed_ip_create(ctxt, values)
values = {'network_id': network['id'],
'address': 'fake2',
'allocated': True}
address2 = db.fixed_ip_create(ctxt, values)
self.assertRaises(exception.NetworkInUse,
db.network_delete_safe, ctxt, network['id'])
db.fixed_ip_update(ctxt, address2, {'allocated': False})
network = db.network_delete_safe(ctxt, network['id'])
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, ctxt, address1)
ctxt = ctxt.elevated(read_deleted='yes')
fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
self.assertTrue(fixed_ip['deleted'])
def test_network_create_with_duplicate_vlan(self):
ctxt = context.get_admin_context()
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
db.network_create_safe(ctxt, values1)
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, ctxt, values2)
def test_instance_update_with_instance_id(self):
""" test instance_update() works when an instance id is passed """
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
values = {'metadata': {'host': 'bar'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
instance_meta = db.instance_metadata_get(ctxt, instance.uuid)
self.assertEqual('bar', instance_meta['host'])
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance.uuid)
self.assertEqual('baz', system_meta['original_image_ref'])
def test_instance_update_with_instance_uuid(self):
""" test instance_update() works when an instance UUID is passed """
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
values = {'metadata': {'host': 'bar'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
instance_meta = db.instance_metadata_get(ctxt, instance.uuid)
self.assertEqual('bar', instance_meta['host'])
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance.uuid)
self.assertEqual('baz', system_meta['original_image_ref'])
def test_instance_update_with_and_get_original(self):
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'vm_state': 'building'}
instance = db.instance_create(ctxt, values)
(old_ref, new_ref) = db.instance_update_and_get_original(ctxt,
instance['uuid'], {'vm_state': 'needscoffee'})
self.assertEquals("building", old_ref["vm_state"])
self.assertEquals("needscoffee", new_ref["vm_state"])
def test_instance_fault_create(self):
"""Ensure we can create an instance fault"""
ctxt = context.get_admin_context()
uuid = str(utils.gen_uuid())
# Create a fault
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': 404,
}
db.instance_fault_create(ctxt, fault_values)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(ctxt, [uuid])
self.assertEqual(404, faults[uuid][0]['code'])
def test_instance_fault_get_by_instance(self):
""" ensure we can retrieve an instance fault by instance UUID """
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
uuids = [instance1['uuid'], instance2['uuid']]
# Create faults
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[0],
'code': 404,
}
fault1 = db.instance_fault_create(ctxt, fault_values)
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[0],
'code': 500,
}
fault2 = db.instance_fault_create(ctxt, fault_values)
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[1],
'code': 404,
}
fault3 = db.instance_fault_create(ctxt, fault_values)
fault_values = {
'message': 'message',
'details': 'detail',
'instance_uuid': uuids[1],
'code': 500,
}
fault4 = db.instance_fault_create(ctxt, fault_values)
instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
expected = {
uuids[0]: [fault2, fault1],
uuids[1]: [fault4, fault3],
}
self.assertEqual(instance_faults, expected)
def test_instance_faults_get_by_instance_uuids_no_faults(self):
"""None should be returned when no faults exist"""
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
uuids = [instance1['uuid'], instance2['uuid']]
instance_faults = db.instance_fault_get_by_instance_uuids(ctxt, uuids)
expected = {uuids[0]: [], uuids[1]: []}
self.assertEqual(expected, instance_faults)
def test_dns_registration(self):
domain1 = 'test.domain.one'
domain2 = 'test.domain.two'
testzone = 'testzone'
ctxt = context.get_admin_context()
db.dnsdomain_register_for_zone(ctxt, domain1, testzone)
domain_ref = db.dnsdomain_get(ctxt, domain1)
zone = domain_ref.availability_zone
scope = domain_ref.scope
self.assertEqual(scope, 'private')
self.assertEqual(zone, testzone)
db.dnsdomain_register_for_project(ctxt, domain2,
self.project_id)
domain_ref = db.dnsdomain_get(ctxt, domain2)
project = domain_ref.project_id
scope = domain_ref.scope
self.assertEqual(project, self.project_id)
self.assertEqual(scope, 'public')
db.dnsdomain_unregister(ctxt, domain1)
db.dnsdomain_unregister(ctxt, domain2)
def test_network_get_associated_fixed_ips(self):
ctxt = context.get_admin_context()
values = {'host': 'foo', 'hostname': 'myname'}
instance = db.instance_create(ctxt, values)
values = {'address': 'bar', 'instance_id': instance['id']}
vif = db.virtual_interface_create(ctxt, values)
values = {'address': 'baz',
'network_id': 1,
'allocated': True,
'instance_id': instance['id'],
'virtual_interface_id': vif['id']}
fixed_address = db.fixed_ip_create(ctxt, values)
data = db.network_get_associated_fixed_ips(ctxt, 1)
self.assertEqual(len(data), 1)
record = data[0]
self.assertEqual(record['address'], fixed_address)
self.assertEqual(record['instance_id'], instance['id'])
self.assertEqual(record['network_id'], 1)
self.assertEqual(record['instance_created'], instance['created_at'])
self.assertEqual(record['instance_updated'], instance['updated_at'])
self.assertEqual(record['instance_hostname'], instance['hostname'])
self.assertEqual(record['vif_id'], vif['id'])
self.assertEqual(record['vif_address'], vif['address'])
data = db.network_get_associated_fixed_ips(ctxt, 1, 'nothing')
self.assertEqual(len(data), 0)
def _timeout_test(self, ctxt, timeout, multi_host):
values = {'host': 'foo'}
instance = db.instance_create(ctxt, values)
values = {'multi_host': multi_host, 'host': 'bar'}
net = db.network_create_safe(ctxt, values)
old = time = timeout - datetime.timedelta(seconds=5)
new = time = timeout + datetime.timedelta(seconds=5)
# should deallocate
values = {'allocated': False,
'instance_id': instance['id'],
'network_id': net['id'],
'updated_at': old}
db.fixed_ip_create(ctxt, values)
# still allocated
values = {'allocated': True,
'instance_id': instance['id'],
'network_id': net['id'],
'updated_at': old}
db.fixed_ip_create(ctxt, values)
# wrong network
values = {'allocated': False,
'instance_id': instance['id'],
'network_id': None,
'updated_at': old}
db.fixed_ip_create(ctxt, values)
# too new
values = {'allocated': False,
'instance_id': instance['id'],
'network_id': None,
'updated_at': new}
db.fixed_ip_create(ctxt, values)
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
now = timeutils.utcnow()
ctxt = context.get_admin_context()
self._timeout_test(ctxt, now, False)
result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'foo', now)
self.assertEqual(result, 0)
result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'bar', now)
self.assertEqual(result, 1)
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
now = timeutils.utcnow()
ctxt = context.get_admin_context()
self._timeout_test(ctxt, now, True)
result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'foo', now)
self.assertEqual(result, 1)
result = db.fixed_ip_disassociate_all_by_timeout(ctxt, 'bar', now)
self.assertEqual(result, 0)
def test_get_vol_mapping_non_admin(self):
ref = db.ec2_volume_create(self.context, 'fake-uuid')
ec2_id = db.get_ec2_volume_id_by_uuid(self.context, 'fake-uuid')
self.assertEqual(ref['id'], ec2_id)
def test_get_snap_mapping_non_admin(self):
ref = db.ec2_snapshot_create(self.context, 'fake-uuid')
ec2_id = db.get_ec2_snapshot_id_by_uuid(self.context, 'fake-uuid')
self.assertEqual(ref['id'], ec2_id)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate',
'availability_zone': 'fake_avail_zone', }
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result.id, host)
return result
class AggregateDBApiTestCase(test.TestCase):
def setUp(self):
super(AggregateDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_aggregate_create(self):
"""Ensure aggregate can be created with no metadata."""
result = _create_aggregate(metadata=None)
self.assertEquals(result.name, 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
"""Test we can avoid conflict on deleted aggregates."""
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1.id)
values = {'name': r1.name, 'availability_zone': 'new_zone'}
r2 = _create_aggregate(values=values)
self.assertEqual(r2.name, values['name'])
self.assertEqual(r2.availability_zone, values['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
"""Ensure aggregate names are distinct."""
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
"""Ensure AggregateNotFound is raised when getting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get,
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
"""Ensure AggregateNotFound is raised when getting metadata."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_metadata_get,
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
"""Ensure aggregate can be created with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertDictMatch(expected_metadata, _get_fake_aggr_metadata())
def test_aggregate_create_low_privi_context(self):
"""Ensure right context is applied when creating aggregate."""
self.assertRaises(exception.AdminRequired,
db.aggregate_create,
self.context, _get_fake_aggr_values())
def test_aggregate_get(self):
"""Ensure we can get aggregate with all its relations."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result.id)
self.assertEqual(_get_fake_aggr_hosts(), expected.hosts)
self.assertEqual(_get_fake_aggr_metadata(), expected.metadetails)
def test_aggregate_get_by_host(self):
"""Ensure we can get an aggregate by host."""
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt)
r2 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1.id, r2.id)
def test_aggregate_get_by_host_not_found(self):
"""Ensure AggregateHostNotFound is raised with unknown host."""
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_get_by_host, ctxt, 'unknown_host')
def test_aggregate_delete_raise_not_found(self):
"""Ensure AggregateNotFound is raised when deleting an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_delete,
ctxt, aggregate_id)
def test_aggregate_delete(self):
"""Ensure we can delete an aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
expected = db.aggregate_get_all(ctxt)
self.assertEqual(0, len(expected))
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
result['id'])
self.assertEqual(aggregate.deleted, True)
def test_aggregate_update(self):
"""Ensure an aggregate can be updated."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, 1, new_values)
self.assertNotEqual(result.availability_zone,
updated.availability_zone)
def test_aggregate_update_with_metadata(self):
"""Ensure an aggregate can be updated with metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
db.aggregate_update(ctxt, 1, values)
expected = db.aggregate_metadata_get(ctxt, result.id)
self.assertDictMatch(_get_fake_aggr_metadata(), expected)
def test_aggregate_update_with_existing_metadata(self):
"""Ensure an aggregate can be updated with existing metadata."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
db.aggregate_update(ctxt, 1, values)
expected = db.aggregate_metadata_get(ctxt, result.id)
self.assertDictMatch(values['metadata'], expected)
def test_aggregate_update_raise_not_found(self):
"""Ensure AggregateNotFound is raised when updating an aggregate."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
new_values = _get_fake_aggr_values()
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
"""Ensure we can get all aggregates."""
ctxt = context.get_admin_context()
counter = 3
for c in xrange(counter):
_create_aggregate(context=ctxt,
values={'name': 'fake_aggregate_%d' % c,
'availability_zone': 'fake_avail_zone'},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
"""Ensure we get only non-deleted aggregates."""
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in xrange(1, add_counter):
values = {'name': 'fake_aggregate_%d' % c,
'availability_zone': 'fake_avail_zone'}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in xrange(1, remove_counter):
db.aggregate_delete(ctxt, aggregates[c - 1].id)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
"""Ensure we can add metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result.id, metadata)
expected = db.aggregate_metadata_get(ctxt, result.id)
self.assertDictMatch(metadata, expected)
def test_aggregate_metadata_update(self):
"""Ensure we can update metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = metadata.keys()[0]
db.aggregate_metadata_delete(ctxt, result.id, key)
new_metadata = {key: 'foo'}
db.aggregate_metadata_add(ctxt, result.id, new_metadata)
expected = db.aggregate_metadata_get(ctxt, result.id)
metadata[key] = 'foo'
self.assertDictMatch(metadata, expected)
def test_aggregate_metadata_delete(self):
"""Ensure we can delete metadata for the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result.id, metadata)
db.aggregate_metadata_delete(ctxt, result.id, metadata.keys()[0])
expected = db.aggregate_metadata_get(ctxt, result.id)
del metadata[metadata.keys()[0]]
self.assertDictMatch(metadata, expected)
def test_aggregate_metadata_delete_raise_not_found(self):
"""Ensure AggregateMetadataNotFound is raised when deleting."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
ctxt, result.id, 'foo_key')
def test_aggregate_host_add(self):
"""Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result.id)
self.assertEqual(_get_fake_aggr_hosts(), expected)
def test_aggregate_host_add_deleted(self):
"""Ensure we can add a host that was previously deleted."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
db.aggregate_host_delete(ctxt, result.id, host)
db.aggregate_host_add(ctxt, result.id, host)
expected = db.aggregate_host_get_all(ctxt, result.id)
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
"""Ensure we can add host to distinct aggregates."""
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
values={'name': 'fake_aggregate2',
'availability_zone': 'fake_avail_zone2', },
metadata=None)
h1 = db.aggregate_host_get_all(ctxt, r1.id)
h2 = db.aggregate_host_get_all(ctxt, r2.id)
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
"""Ensure we cannot add host to the same aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
ctxt, result.id, _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
"""Ensure AggregateFound when adding a host."""
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
host = _get_fake_aggr_hosts()[0]
self.assertRaises(exception.AggregateNotFound,
db.aggregate_host_add,
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
"""Ensure we can add host to the aggregate."""
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result.id,
_get_fake_aggr_hosts()[0])
expected = db.aggregate_host_get_all(ctxt, result.id)
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
"""Ensure AggregateHostNotFound is raised when deleting a host."""
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
ctxt, result.id, _get_fake_aggr_hosts()[0])
class CapacityTestCase(test.TestCase):
def setUp(self):
super(CapacityTestCase, self).setUp()
self.ctxt = context.get_admin_context()
service_dict = dict(host='host1', binary='binary1',
topic='compute', report_count=1,
disabled=False)
self.service = db.service_create(self.ctxt, service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
service_id=self.service.id)
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
def _create_helper(self, host):
self.compute_node_dict['host'] = host
return db.compute_node_create(self.ctxt, self.compute_node_dict)
def test_compute_node_create(self):
item = self._create_helper('host1')
self.assertEquals(item.free_ram_mb, 1024)
self.assertEquals(item.free_disk_gb, 2048)
self.assertEquals(item.running_vms, 0)
self.assertEquals(item.current_workload, 0)
def test_compute_node_create_with_reservations(self):
self.flags(reserved_host_memory_mb=256)
item = self._create_helper('host1')
self.assertEquals(item.free_ram_mb, 1024 - 256)
def test_compute_node_set(self):
self._create_helper('host1')
x = db.compute_node_utilization_set(self.ctxt, 'host1',
free_ram_mb=2048, free_disk_gb=4096)
self.assertEquals(x.free_ram_mb, 2048)
self.assertEquals(x.free_disk_gb, 4096)
self.assertEquals(x.running_vms, 0)
self.assertEquals(x.current_workload, 0)
x = db.compute_node_utilization_set(self.ctxt, 'host1', work=3)
self.assertEquals(x.free_ram_mb, 2048)
self.assertEquals(x.free_disk_gb, 4096)
self.assertEquals(x.current_workload, 3)
self.assertEquals(x.running_vms, 0)
x = db.compute_node_utilization_set(self.ctxt, 'host1', vms=5)
self.assertEquals(x.free_ram_mb, 2048)
self.assertEquals(x.free_disk_gb, 4096)
self.assertEquals(x.current_workload, 3)
self.assertEquals(x.running_vms, 5)
def test_compute_node_utilization_update(self):
self._create_helper('host1')
x = db.compute_node_utilization_update(self.ctxt, 'host1',
free_ram_mb_delta=-24)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2048)
self.assertEquals(x.running_vms, 0)
self.assertEquals(x.current_workload, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
free_disk_gb_delta=-48)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.running_vms, 0)
self.assertEquals(x.current_workload, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
work_delta=3)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.current_workload, 3)
self.assertEquals(x.running_vms, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
work_delta=-1)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.current_workload, 2)
self.assertEquals(x.running_vms, 0)
x = db.compute_node_utilization_update(self.ctxt, 'host1',
vm_delta=5)
self.assertEquals(x.free_ram_mb, 1000)
self.assertEquals(x.free_disk_gb, 2000)
self.assertEquals(x.current_workload, 2)
self.assertEquals(x.running_vms, 5)
class TestIpAllocation(test.TestCase):
def setUp(self):
super(TestIpAllocation, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
self.network = db.network_create_safe(self.ctxt, {})
def create_fixed_ip(self, **params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, None)
def test_fixed_ip_associate_fails_if_ip_in_use(self):
address = self.create_fixed_ip(instance_id=self.instance.id)
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
self.ctxt, address, self.instance.id)
def test_fixed_ip_associate_succeeds(self):
address = self.create_fixed_ip(network_id=self.network.id)
db.fixed_ip_associate(self.ctxt, address, self.instance.id,
network_id=self.network.id)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip.instance_id, self.instance.id)
def test_fixed_ip_associate_succeeds_and_sets_network(self):
address = self.create_fixed_ip()
db.fixed_ip_associate(self.ctxt, address, self.instance.id,
network_id=self.network.id)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip.instance_id, self.instance.id)
self.assertEqual(fixed_ip.network_id, self.network.id)
class InstanceDestroyConstraints(test.TestCase):
def test_destroy_with_equal_any_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'resize'})
constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
def test_destroy_with_not_equal_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_not_equal_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'active'})
constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
def _get_sm_backend_params():
config_params = ("name_label=testsmbackend "
"server=localhost "
"serverpath=/tmp/nfspath")
params = dict(flavor_id=1,
sr_uuid=None,
sr_type='nfs',
config_params=config_params)
return params
def _get_sm_flavor_params():
params = dict(label="gold",
description="automatic backups")
return params
class SMVolumeDBApiTestCase(test.TestCase):
def setUp(self):
super(SMVolumeDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_sm_backend_conf_create(self):
params = _get_sm_backend_params()
ctxt = context.get_admin_context()
beconf = db.sm_backend_conf_create(ctxt,
params)
self.assertIsInstance(beconf['id'], int)
def test_sm_backend_conf_create_raise_duplicate(self):
params = _get_sm_backend_params()
ctxt = context.get_admin_context()
beconf = db.sm_backend_conf_create(ctxt,
params)
self.assertIsInstance(beconf['id'], int)
self.assertRaises(exception.Duplicate,
db.sm_backend_conf_create,
ctxt,
params)
def test_sm_backend_conf_update(self):
ctxt = context.get_admin_context()
params = _get_sm_backend_params()
beconf = db.sm_backend_conf_create(ctxt,
params)
beconf = db.sm_backend_conf_update(ctxt,
beconf['id'],
dict(sr_uuid="FA15E-1D"))
self.assertEqual(beconf['sr_uuid'], "FA15E-1D")
def test_sm_backend_conf_update_raise_notfound(self):
ctxt = context.get_admin_context()
self.assertRaises(exception.NotFound,
db.sm_backend_conf_update,
ctxt,
7,
dict(sr_uuid="FA15E-1D"))
def test_sm_backend_conf_get(self):
ctxt = context.get_admin_context()
params = _get_sm_backend_params()
beconf = db.sm_backend_conf_create(ctxt,
params)
val = db.sm_backend_conf_get(ctxt, beconf['id'])
self.assertDictMatch(dict(val), dict(beconf))
def test_sm_backend_conf_get_raise_notfound(self):
ctxt = context.get_admin_context()
self.assertRaises(exception.NotFound,
db.sm_backend_conf_get,
ctxt,
7)
def test_sm_backend_conf_get_by_sr(self):
ctxt = context.get_admin_context()
params = _get_sm_backend_params()
beconf = db.sm_backend_conf_create(ctxt,
params)
val = db.sm_backend_conf_get_by_sr(ctxt, beconf['sr_uuid'])
self.assertDictMatch(dict(val), dict(beconf))
def test_sm_backend_conf_get_by_sr_raise_notfound(self):
ctxt = context.get_admin_context()
self.assertRaises(exception.NotFound,
db.sm_backend_conf_get_by_sr,
ctxt,
"FA15E-1D")
def test_sm_backend_conf_delete(self):
ctxt = context.get_admin_context()
params = _get_sm_backend_params()
beconf = db.sm_backend_conf_create(ctxt,
params)
db.sm_backend_conf_delete(ctxt, beconf['id'])
self.assertRaises(exception.NotFound,
db.sm_backend_conf_get,
ctxt,
beconf['id'])
def test_sm_backend_conf_delete_nonexisting(self):
ctxt = context.get_admin_context()
self.assertNotRaises(None, db.sm_backend_conf_delete,
ctxt, "FA15E-1D")
def test_sm_flavor_create(self):
ctxt = context.get_admin_context()
params = _get_sm_flavor_params()
flav = db.sm_flavor_create(ctxt,
params)
self.assertIsInstance(flav['id'], int)
def sm_flavor_create_raise_duplicate(self):
ctxt = context.get_admin_context()
params = _get_sm_flavor_params()
flav = db.sm_flavor_create(ctxt,
params)
self.assertRaises(exception.Duplicate,
db.sm_flavor_create,
params)
def test_sm_flavor_update(self):
ctxt = context.get_admin_context()
params = _get_sm_flavor_params()
flav = db.sm_flavor_create(ctxt,
params)
newparms = dict(description="basic volumes")
flav = db.sm_flavor_update(ctxt, flav['id'], newparms)
self.assertEqual(flav['description'], "basic volumes")
def test_sm_flavor_update_raise_notfound(self):
ctxt = context.get_admin_context()
self.assertRaises(exception.NotFound,
db.sm_flavor_update,
ctxt,
7,
dict(description="fakedesc"))
def test_sm_flavor_delete(self):
ctxt = context.get_admin_context()
params = _get_sm_flavor_params()
flav = db.sm_flavor_create(ctxt,
params)
db.sm_flavor_delete(ctxt, flav['id'])
self.assertRaises(exception.NotFound,
db.sm_flavor_get,
ctxt,
"gold")
def test_sm_flavor_delete_nonexisting(self):
ctxt = context.get_admin_context()
self.assertNotRaises(None, db.sm_flavor_delete,
ctxt, 7)
def test_sm_flavor_get(self):
ctxt = context.get_admin_context()
params = _get_sm_flavor_params()
flav = db.sm_flavor_create(ctxt,
params)
val = db.sm_flavor_get(ctxt, flav['id'])
self.assertDictMatch(dict(val), dict(flav))
def test_sm_flavor_get_raise_notfound(self):
ctxt = context.get_admin_context()
self.assertRaises(exception.NotFound,
db.sm_flavor_get,
ctxt,
7)
def test_sm_flavor_get_by_label(self):
ctxt = context.get_admin_context()
params = _get_sm_flavor_params()
flav = db.sm_flavor_create(ctxt,
params)
val = db.sm_flavor_get_by_label(ctxt, flav['label'])
self.assertDictMatch(dict(val), dict(flav))
def test_sm_flavor_get_by_label_raise_notfound(self):
ctxt = context.get_admin_context()
self.assertRaises(exception.NotFound,
db.sm_flavor_get,
ctxt,
"fake")
| 42.679963 | 78 | 0.625986 |
acf0b19638f09eb7b4b7d4990cd3b264b9f04ab7 | 1,852 | py | Python | src/azure-cli/azure/cli/command_modules/extension/custom.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/extension/custom.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/extension/custom.py | nexxai/azure-cli | 3f24ada49f3323d9310d46ccc1025dc99fc4cf8e | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from azure.cli.core.extension.operations import (
add_extension, remove_extension, list_extensions, show_extension,
list_available_extensions, update_extension, list_versions)
logger = get_logger(__name__)
def add_extension_cmd(cmd, source=None, extension_name=None, index_url=None, yes=None,
pip_extra_index_urls=None, pip_proxy=None, system=None, version=None):
return add_extension(cli_ctx=cmd.cli_ctx, source=source, extension_name=extension_name, index_url=index_url,
yes=yes, pip_extra_index_urls=pip_extra_index_urls, pip_proxy=pip_proxy, system=system,
version=version)
def remove_extension_cmd(extension_name):
return remove_extension(extension_name)
def list_extensions_cmd():
return list_extensions()
def show_extension_cmd(extension_name):
return show_extension(extension_name)
def update_extension_cmd(cmd, extension_name, index_url=None, pip_extra_index_urls=None, pip_proxy=None):
return update_extension(cli_ctx=cmd.cli_ctx, extension_name=extension_name, index_url=index_url,
pip_extra_index_urls=pip_extra_index_urls, pip_proxy=pip_proxy)
def list_available_extensions_cmd(index_url=None, show_details=False):
return list_available_extensions(index_url=index_url, show_details=show_details)
def list_versions_cmd(extension_name, index_url=None):
return list_versions(extension_name, index_url=index_url)
| 42.090909 | 112 | 0.697624 |
acf0b2330fdeb264fc7b4f6a7bb2dc1bcc0926e4 | 698 | py | Python | deepdab/util/plotter_multiple3.py | lantunes/deepdab | 0e30f102b9d7c37d3691540496b1649f2704d586 | [
"Apache-2.0"
] | 1 | 2019-04-04T02:26:51.000Z | 2019-04-04T02:26:51.000Z | deepdab/util/plotter_multiple3.py | lantunes/deepdab | 0e30f102b9d7c37d3691540496b1649f2704d586 | [
"Apache-2.0"
] | null | null | null | deepdab/util/plotter_multiple3.py | lantunes/deepdab | 0e30f102b9d7c37d3691540496b1649f2704d586 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import pylab
data = pylab.loadtxt('../out/plotdata.txt', delimiter=',', usecols=(0, 1, 2, 3, 4))
pylab.plot(data[:, 0], data[:, 1], 'k-', linewidth=0.5, marker='.', markersize=2, label='L0', color='black')
pylab.plot(data[:, 0], data[:, 2], 'k-', linewidth=0.5, marker='.', markersize=2, label='L1', color='blue')
pylab.plot(data[:, 0], data[:, 3], 'k-', linewidth=0.5, marker='.', markersize=2, label='L2', color='red')
pylab.plot(data[:, 0], data[:, 4], 'k-', linewidth=0.5, marker='.', markersize=2, label='L2', color='green')
plt.xlabel("iteration", size=15)
plt.ylabel("performance", size=15)
plt.grid(True, linewidth=0.2)
plt.legend(loc='best')
plt.show()
| 41.058824 | 108 | 0.631805 |
acf0b3dd47e572ae2791e7312f66fbe571985a7b | 1,023 | py | Python | venv/Scripts/pasteurize-script.py | serenasensini/TheRedCode_Docker-per-Django-e-Postgres | 78a2ca1f09ab956a6936d14a5fd99336ff39f472 | [
"BSD-3-Clause"
] | 2 | 2021-12-02T11:41:02.000Z | 2021-12-27T12:01:53.000Z | venv/Scripts/pasteurize-script.py | serenasensini/TheRedCode_Docker-per-Django-e-Postgres | 78a2ca1f09ab956a6936d14a5fd99336ff39f472 | [
"BSD-3-Clause"
] | null | null | null | venv/Scripts/pasteurize-script.py | serenasensini/TheRedCode_Docker-per-Django-e-Postgres | 78a2ca1f09ab956a6936d14a5fd99336ff39f472 | [
"BSD-3-Clause"
] | null | null | null | #!C:\Users\serena.sensini\PycharmProjects\djangogirls\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','pasteurize'
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = 'future==0.18.2'
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point('future==0.18.2', 'console_scripts', 'pasteurize')())
| 30.088235 | 83 | 0.709677 |
acf0b4560335cc67a85f80996dd9ae2c70035a60 | 1,633 | py | Python | readthedocs/doc_builder/backends/sphinx_man.py | saltycrane/readthedocs.org | a93a9d5ed8a31b16026c80283ad5dbba0adb62d4 | [
"MIT"
] | null | null | null | readthedocs/doc_builder/backends/sphinx_man.py | saltycrane/readthedocs.org | a93a9d5ed8a31b16026c80283ad5dbba0adb62d4 | [
"MIT"
] | null | null | null | readthedocs/doc_builder/backends/sphinx_man.py | saltycrane/readthedocs.org | a93a9d5ed8a31b16026c80283ad5dbba0adb62d4 | [
"MIT"
] | 1 | 2018-03-28T09:02:57.000Z | 2018-03-28T09:02:57.000Z | from glob import glob
import os
from django.conf import settings
from doc_builder.base import restoring_chdir
from doc_builder.backends.sphinx import Builder as ManpageBuilder
from projects.utils import run
from core.utils import copy_file_to_app_servers
class Builder(ManpageBuilder):
@restoring_chdir
def build(self):
project = self.version.project
os.chdir(self.version.project.conf_dir(self.version.slug))
if project.use_virtualenv:
build_command = '%s -b man -d _build/doctrees . _build/man' % project.venv_bin(
version=self.version.slug, bin='sphinx-build')
else:
build_command = "sphinx-build -b man . _build/man"
build_results = run(build_command)
return build_results
def move(self):
project = self.version.project
outputted_path = os.path.join(project.conf_dir(self.version.slug),
'_build', 'man')
to_path = os.path.join(settings.MEDIA_ROOT,
'man',
project.slug,
self.version.slug)
from_globs = glob(os.path.join(outputted_path, "*.1"))
if from_globs:
from_file = from_globs[0]
to_file = os.path.join(to_path, '%s.1' % project.slug)
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
copy_file_to_app_servers(from_file, to_file)
else:
if not os.path.exists(to_path):
os.makedirs(to_path)
run('mv -f %s %s' % (from_file, to_file))
| 37.113636 | 92 | 0.595836 |
acf0b4b8fcba39e715de69ed43af9d36fea1d8a0 | 8,562 | py | Python | lightRaven/agent/cem_seldonian.py | M0gician/lightRaven | edcaed1ffbfab95064fc2719f2e3f79375ce6f04 | [
"MIT"
] | 1 | 2020-12-16T07:41:44.000Z | 2020-12-16T07:41:44.000Z | lightRaven/agent/cem_seldonian.py | M0gician/lightRaven | edcaed1ffbfab95064fc2719f2e3f79375ce6f04 | [
"MIT"
] | null | null | null | lightRaven/agent/cem_seldonian.py | M0gician/lightRaven | edcaed1ffbfab95064fc2719f2e3f79375ce6f04 | [
"MIT"
] | null | null | null | import numpy as np
from typing import List, Callable
from lightRaven.sampling import SamplingBase
from lightRaven.policy import FAPolicy, TabularPolicy, PolicyBase
class CEMSeldonian:
"""
The Seldonian version of the Cross Entropy Method learning algorithm.
"""
def __init__(self, epochs: int, pop_size: int, elite_ratio: float,
ci_ub: Callable, ref_size: int, g_funcs: List[Callable], c=1, delta=0.05,
gamma=0.9, extra_std=2.0, extra_decay_time=10, n_proc=8):
"""
Parameters
----------
epochs : int
The total number of iterations that the algorithm will try to train and improve on its current policies.
pop_size : int
The total number of candidate policies (population) that will be generated at each epoch.
elite_ratio : float
The percentage of candidates that we will keep (elites) to improve on the next policy generation.
The algorithm will keep the top `elite_ratio` percent of the population to update its mean and variance
parameter.
ci_ub : Callable
A function calculates the concentration upper bounds for safety test.
ref_size : int
The size of the dataset in the safety test.
Used in the concentration bound calculation to prevent producing a bound that is overly conservative
g_funcs : List[Callable]
A :obj:`list` of user-defined constraint functions for safety test.
c : int
A hyperparameter used to scale the concentration interval generated by `ci_ub`.
For student t, use `2` in candidate selection; Otherwise use `1`.
delta : float
The significance level for the safety test in order to get a high confidence performance lower bound
of a candidate policy
gamma : float
The discount factor. Mostly predefined by the MDP.
extra_std : float
A hyperparameter which adds extra variance to the Covariance matrix
extra_decay_time : float
A hyperparameter which controls the scale of the extra variance adds to the Covariance matrix
n_proc : int
The total number of processes that can be spawned for parallellization
"""
self.epochs = epochs
self.pop_size = pop_size
self.elite_ratio = elite_ratio
assert 0 < elite_ratio < 1
self.elite_size = int(self.pop_size * self.elite_ratio)
self.extra_std = extra_std
self.extra_decay_time = extra_decay_time
self.n_proc = n_proc
self.gamma = gamma
self.p_type = None
self.sampler = None
self.obs_shape = None
self.act_shape = None
self.theta_dim = None
self.means = None
self.stds = None
self.elites = None
self.sampler = None
self.best_thetas = np.array([])
self.ci_ub = ci_ub
self.delta = delta
self.ref_size = ref_size
self.c = c
self.g_funcs = g_funcs
self.penalty = 1e5
@staticmethod
def get_best_idx(rewards: np.ndarray) -> np.ndarray:
""" Compute any indices of the candidate policies with non-zero performance after safety test
Parameters
----------
rewards : np.ndarray
A vector of estimated performance for each candidate policy
"""
return np.where(rewards > 0)
def load_setup(self, obs_shape: int, act_shape: int, p_type: str) -> None:
""" Load a supported Gym environment into the algorithm
Parameters
----------
obs_shape : int
Observation space shape
act_shape : int
Action space shape
p_type : str
Indicating policy type from 'fa' (functional approximation), 'tb' (tabular method)
"""
self.obs_shape, self.act_shape = obs_shape, act_shape
if p_type not in ['fa', 'tb']:
ValueError(f'policy type {p_type} is not supported!')
self.p_type = p_type
def load_sampler(self, sampler: SamplingBase) -> None:
""" Load the performance estimator for candidate evaluation
Parameters
----------
sampler : SamplingBase
An estimator used for candidate evaluation
"""
self.sampler = sampler
def init_params(self, theta=None) -> None:
""" Define the policy shape. Initialize the mean and std vector for policy generation of CEM.
"""
assert all(isinstance(val, int) for val in [self.obs_shape, self.act_shape])
self.theta_dim = self.obs_shape * self.act_shape
if theta is not None:
self.means = theta
else:
self.means = np.random.uniform(size=self.theta_dim)
self.stds = np.ones(self.theta_dim)
def update_params(self, elites: np.ndarray) -> None:
""" Use a predefined policy matrix to initialize the mean and std vector of CEM.
Parameters
----------
elites : np.ndarray
A predefined policy that is previously trained by CEM
"""
self.means = np.mean(elites, axis=0)
self.stds = np.std(elites, axis=0)
def candidate_eval(self, theta: np.ndarray) -> float:
""" Run the given candidate policy over the environment and returns its average performance.
Parameters
----------
theta : np.ndarray
A candidate policy generated by CEM
"""
assert isinstance(self.sampler, SamplingBase)
policy = None
if self.p_type == 'fa':
policy = FAPolicy(self.obs_shape, self.act_shape, theta)
elif self.p_type == 'tb':
policy = TabularPolicy(self.obs_shape, self.act_shape, theta)
assert isinstance(policy, PolicyBase)
self.sampler.load_eval_policy(policy)
rewards = self.sampler.get_est()
perf_est = rewards.mean()
for g in self.g_funcs:
if self.ci_ub(g(rewards), ref_size=self.ref_size, c=self.c, delta=self.delta) > 0:
perf_est -= self.penalty
break
return perf_est
def get_elite_idx(self, rewards: np.ndarray) -> np.ndarray:
""" Compute the indices of the candidate policies by their estimated performance (descending order)
Parameters
----------
rewards : np.ndarray
A vector of estimated performance for each candidate policy
"""
return np.argsort(rewards)[::-1][:self.elite_size]
def train(self):
""" Iterate over all candidate policies and update parameters using elite policies.
"""
# Check if parameters are set
assert all(isinstance(val, np.ndarray) for val in [self.means, self.stds])
for epoch in range(self.epochs):
extra_cov = max(1.0 - epoch / self.extra_decay_time, 0) * self.extra_std**2
candidates = np.random.multivariate_normal(
mean=self.means,
cov=np.diag(np.array(self.stds**2) + extra_cov),
size=self.pop_size
)
g_candidates = list(map(self.candidate_eval, candidates))
g_candidates = np.array(g_candidates).reshape(-1)
elite_mask = self.get_elite_idx(g_candidates)
best_mask = self.get_best_idx(g_candidates)
self.elites = candidates[elite_mask]
self.best_thetas = candidates[best_mask]
self.update_params(self.elites)
def get_best_candidates(self):
""" Return the last generated elite policies by Seldonian CEM
"""
if self.best_thetas.shape[0] == 0:
return self.means
else:
return self.best_thetas
def get_best_rewards(self, n: int):
""" Return the performance estimation of top-n elite policies.
Parameters
----------
n : int
The top-n policies that will be evaluated and monitored.
"""
if self.best_thetas.shape[0] == 0:
return self.means.reshape(1, -1)
if len(self.best_thetas.shape) == 1:
return self.best_thetas.reshape(1, -1)
else:
return [self.candidate_eval(theta) for theta in self.best_thetas[:n]]
# Add any constraint functions here
def g0(d: np.ndarray) -> np.ndarray:
return 20 - d
def g1(d: np.ndarray) -> np.ndarray:
return 25 - d
def g2(d: np.ndarray) -> np.ndarray:
return 30 - d
| 36.279661 | 116 | 0.612357 |
acf0b51ac093da3d7e72188d495c254fceae972e | 8,451 | py | Python | dotconfig.py | celskeggs/common-tool | cb4a1508e44338552ac7f51a2f27fc92e28786b7 | [
"BSD-2-Clause"
] | null | null | null | dotconfig.py | celskeggs/common-tool | cb4a1508e44338552ac7f51a2f27fc92e28786b7 | [
"BSD-2-Clause"
] | null | null | null | dotconfig.py | celskeggs/common-tool | cb4a1508e44338552ac7f51a2f27fc92e28786b7 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python2
'''
Python functionality for manipulating a Kconfig .config file.
The Kconfig build configuration system produces .config files that are
essentially conf format (a.k.a. INI) with no sections. When editing multiple
such files, usually some combination of sed and awk are enough, but
occasionally you need more advanced programmatic editing. This file provides an
object, DotConfig, for reading and then editing .config files programmatically.
The implementation is somewhat more involved than a simple conf parser and dict
because we try to preserve line ordering as much as possible. This makes for
cleaner diffs and allows straightforward in-place edits. The interface also
gives the user control over the timestamp header if they need to preserve or
update this.
To use this from the command-line:
# Set CONFIG_FOO=y
dotconfig.py --set FOO=y .config
# Unset CONFIG_BAR
dotconfig.py --unset BAR .config
# Print the value of CONFIG_BAZ
dotconfig.py --get BAZ .config
And the same programmatically:
import dotconfig
c = dotconfig.DotConfig('.config')
c['FOO'] = True
c['BAR'] = None
print c['BAZ']
c.save()
'''
import argparse, datetime, os, re, sys
# Timestamp format used in .config files.
TIMESTAMP_FORMAT = '%a %b %d %H:%M:%S %Y'
class Line(object):
pass
class Uninterpreted(Line):
'''
A line in a .config file that we know nothing about. Assume this must be
emitted exactly as it was seen.
'''
def __init__(self, content):
self.content = content
def __repr__(self):
return self.content
class Timestamp(Line):
'''
A timestamp line in a .config file. Not generally interpreted, but
occasionally the user will want control over this. See references to
preserve_time below for how to control this.
'''
def __init__(self, timestamp):
self.timestamp = timestamp
def __repr__(self):
return '# %s' % self.timestamp.strftime(TIMESTAMP_FORMAT)
class Option(Line):
'''
A Kconfig option value or unset option. These will generally be the lines
users want to manipulate.
'''
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
if self.value is None or (isinstance(self.value, bool) and not self.value):
return '# CONFIG_%s is not set' % self.key
elif isinstance(self.value, bool):
assert self.value
return 'CONFIG_%s=y' % self.key
elif isinstance(self.value, int):
return 'CONFIG_%s=%d' % (self.key, self.value)
elif isinstance(self.value, str):
return 'CONFIG_%s="%s"' % (self.key, self.value)
raise NotImplementedError
def parse_value(raw):
'''
Parse an expression that appears in the assignment of a setting
('CONFIG_FOO=%s').
'''
if raw == 'y':
# Boolean setting that is enabled.
return True
elif raw.startswith('"') and raw.endswith('"'):
# String setting.
return raw[1:-1]
else:
try:
return int(raw)
except ValueError:
# Something else that we don't currently support.
raise NotImplementedError
line_regex = None
def parse_line(line):
# The first time this parser is called we construct this regex on demand
# because it is slightly expensive.
global line_regex
if line_regex is None:
line_regex = re.compile(r'(?P<option1>CONFIG_(?P<key1>[A-Za-z][A-Za-z0-9_]*)=(?P<value>.*))|' \
r'(?P<option2># CONFIG_(?P<key2>[A-Za-z][A-Za-z0-9_]*) is not set)')
match = line_regex.match(line)
if match is not None:
# This line is one of the option forms.
if match.group('option1') is not None:
# A set option.
key = match.group('key1')
raw_value = match.group('value')
value = parse_value(raw_value)
return Option(key, value)
else:
# An unset option.
assert match.group('option2') is not None
key = match.group('key2')
return Option(key, None)
else:
try:
timestamp = datetime.datetime.strptime(line[2:], TIMESTAMP_FORMAT)
# If that succeeded, we have a timestamp line.
return Timestamp(timestamp)
except ValueError:
# Some generic line that we'll handle below.
pass
return Uninterpreted(line)
class DotConfig(object):
'''
A .config file. This is essentially just a glorified dictionary, but it
takes some care to preserve the ordering of the input lines where possible.
Callers can either interact with an object of this type through the dict
interface, or by directly accessing 'options' and 'lines' if they need an
explicit line ordering.
'''
def __init__(self, path):
self.path = os.path.abspath(path)
# We'll track the .config lines that represent actual options in a dict
# as well as the list of lines to give the user more natural syntax for
# getting and setting them.
self.options = {}
# Also duplicate the tracking of timestamps so we can easily update
# them in __setitem__ below.
self.timestamps = []
# Read and parse the config file.
self.lines = []
with open(path, 'r') as f:
for line in f:
line = line[:-1] # Strip trailing newline.
l = parse_line(line)
self.lines.append(l)
if isinstance(l, Timestamp):
self.timestamps.append(l)
elif isinstance(l, Option):
# Enable dictionary lookup of this option later.
self.options[l.key] = l
self.preserve_time = False
def save(self):
with open(self.path, 'w') as f:
print >>f, str(self)
def __getitem__(self, key):
return self.options[key].value
def __setitem__(self, key, value):
if key in self.options:
# Update the option in-place to preserve line ordering.
self.options[key].value = value
else:
opt = Option(key, value)
self.lines.append(opt)
self.options[opt.key] = opt
if not self.preserve_time:
for t in self.timestamps:
t.timestamp = datetime.datetime.now()
def __delitem__(self, key):
opt = self.options[key]
self.lines.remove(opt)
del self.options[key]
def __item__(self):
return iter(self.options)
def __len__(self):
return len(self.options)
def __repr__(self):
return '\n'.join([str(x) for x in self.lines])
def main(argv, out, err):
parser = argparse.ArgumentParser(prog=argv[0],
description='Kconfig configuration file manipulator')
parser.add_argument('--set', '-s', action='append', default=[],
help='set a configuration setting')
parser.add_argument('--unset', '-u', action='append', default=[],
help='unset a configuration setting')
parser.add_argument('--get', '-g', action='append', default=[],
help='print the value of a setting')
parser.add_argument('--preserve-timestamp', action='store_true',
help='keep the current timestamp in the .config file')
parser.add_argument('file', help='path to .config')
opts = parser.parse_args(argv[1:])
try:
c = DotConfig(opts.file)
except Exception as e:
print >>err, 'failed to parse %s: %s' % (opts.file, e)
return -1
if opts.preserve_timestamp:
c.preserve_time = True
for s in opts.set:
if '=' in s:
key, raw_value = s.split('=', 1)
try:
value = parse_value(raw_value)
except NotImplementedError:
print >>err, 'unsupported value %s in --set argument %s' % \
(raw_value, s)
return -1
c[key] = value
else:
c[s] = True
for s in opts.unset:
c[s] = None
c.save()
for s in opts.get:
try:
print >>out, c.options[s]
except KeyError:
print >>err, 'setting %s not found' % s
return -1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv, sys.stdout, sys.stderr))
| 30.843066 | 103 | 0.607857 |
acf0b6473936232c053fec910b8a1a7f598c0935 | 15,636 | py | Python | PyPotter.py | nicholaskillin/PyPotter | 2c8cb82847adc081fdced7a253661af95d397d70 | [
"MIT"
] | 28 | 2019-04-10T11:28:01.000Z | 2022-01-09T14:59:36.000Z | PyPotter.py | nicholaskillin/PyPotter | 2c8cb82847adc081fdced7a253661af95d397d70 | [
"MIT"
] | 3 | 2019-04-11T07:53:13.000Z | 2022-03-29T00:15:49.000Z | PyPotter.py | nicholaskillin/PyPotter | 2c8cb82847adc081fdced7a253661af95d397d70 | [
"MIT"
] | 17 | 2019-04-11T23:40:40.000Z | 2022-02-17T20:59:49.000Z | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import cv2
from cv2 import *
import numpy as np
import math
import os
from os import listdir
from os.path import isfile, join, isdir
import time
import datetime
import threading
from threading import Thread
from statistics import mean
from CountsPerSec import CountsPerSec
from HassApi import HassApi
# Check for required number of arguments
if (len(sys.argv) < 4):
print("Incorrect number of arguments. Required Arguments: [video source url] [home assistant URL] [API token]")
sys.exit(0)
# Parse Required Arguments
videoSource = sys.argv[1]
hassUrl = sys.argv[2]
hassRestToken = sys.argv[3]
# Parse Optional Arguments
IsRemoveBackground = True
IsShowOutputWindows = True
IsTraining = False
IsDebugFps = False
if (len(sys.argv) >= 5):
IsRemoveBackground = sys.argv[4] == "True"
if (len(sys.argv) >= 6):
IsShowOutputWindows = sys.argv[5] == "True"
if (len(sys.argv) >= 7):
IsTraining = sys.argv[6] == "True"
if (len(sys.argv) >= 8):
IsDebugFps = sys.argv[7] == "True"
# Initialize Home Assistant Rest API Wrapper
hass = HassApi(hassUrl, hassRestToken)
# Constants
DesiredFps = 42
DefaultFps = 42 # Original constants trained for 42 FPS
MicroSecondsBetweenFrames = (1 / DesiredFps) * 1000000
TrainingResolution = 50
TrainingNumPixels = TrainingResolution * TrainingResolution
TrainingFolderName = "Training"
SpellEndMovement = 0.5 * (DefaultFps / DesiredFps )
MinSpellLength = 15 * (DesiredFps / DefaultFps)
MinSpellDistance = 100
NumDistancesToAverage = int(round( 20 * (DesiredFps / DefaultFps)))
# Booleans to turn on or off output windows
IsShowOriginal = False
IsShowBackgroundRemoved = False
IsShowThreshold = False
IsShowOutput = False
if IsShowOutputWindows:
IsShowOriginal = True
IsShowBackgroundRemoved = True
IsShowThreshold = True
IsShowOutput = True
# Create Windows
if (IsShowOriginal):
cv2.namedWindow("Original")
cv2.moveWindow("Original", 0, 0)
if (IsShowBackgroundRemoved):
cv2.namedWindow("BackgroundRemoved")
cv2.moveWindow("BackgroundRemoved", 640, 0)
if (IsShowThreshold):
cv2.namedWindow("Threshold")
cv2.moveWindow("Threshold", 0, 480+30)
if (IsShowOutput):
cv2.namedWindow("Output")
cv2.moveWindow("Output", 640, 480+30)
# Init Global Variables
IsNewFrame = False
nameLookup = {}
LastSpell = "None"
originalCps = CountsPerSec()
noBackgroundCps = CountsPerSec()
thresholdCps = CountsPerSec()
outputCps = CountsPerSec()
lk_params = dict( winSize = (25,25),
maxLevel = 7,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
IsNewFrame = False
frame = None
IsNewFrameNoBackground = False
frame_no_background = None
IsNewFrameThreshold = False
frameThresh = None
findNewWands = True
trackedPoints = None
wandTracks = []
def InitClassificationAlgo() :
"""
Create and Train k-Nearest Neighbor Algorithm
"""
global knn, nameLookup
labelNames = []
labelIndexes = []
trainingSet = []
numPics = 0
dirCount = 0
scriptpath = os.path.realpath(__file__)
trainingDirectory = join(os.path.dirname(scriptpath), TrainingFolderName)
# Every folder in the training directory contains a set of images corresponding to a single spell.
# Loop through all folders to train all spells.
for d in listdir(trainingDirectory):
if isdir(join(trainingDirectory, d)):
nameLookup[dirCount] = d
dirCount = dirCount + 1
for f in listdir(join(trainingDirectory,d)):
if isfile(join(trainingDirectory,d,f)):
labelNames.append(d)
labelIndexes.append(dirCount-1)
trainingSet.append(join(trainingDirectory,d,f));
numPics = numPics + 1
print ("Trained Spells: ")
print (nameLookup)
samples = []
for i in range(0, numPics):
img = cv2.imread(trainingSet[i])
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
samples.append(gray);
npArray = np.array(samples)
shapedArray = npArray.reshape(-1,TrainingNumPixels).astype(np.float32);
# Create KNN and Train
knn = cv2.ml.KNearest_create()
knn.train(shapedArray, cv2.ml.ROW_SAMPLE, np.array(labelIndexes))
def ClassifyImage(img):
"""
Classify input image based on previously trained k-Nearest Neighbor Algorithm
"""
global knn, nameLookup, args
if (img.size <= 0):
return "Error"
size = (TrainingResolution, TrainingResolution)
test_gray = cv2.resize(img,size,interpolation=cv2.INTER_LINEAR)
imgArr = np.array(test_gray).astype(np.float32)
sample = imgArr.reshape(-1, TrainingNumPixels).astype(np.float32)
ret, result, neighbours, dist = knn.findNearest(sample,k=5)
print(ret, result, neighbours, dist)
if IsTraining:
filename = "char" + str(time.time()) + nameLookup[ret] + ".png"
cv2.imwrite(join(TrainingFolderName, filename), test_gray)
if nameLookup[ret] is not None:
print("Match: " + nameLookup[ret])
return nameLookup[ret]
else:
return "error"
def PerformSpell(spell):
"""
Make the desired Home Assistant REST API call based on the spell
"""
if (spell=="incendio"):
hass.TriggerAutomation("automation.wand_incendio")
elif (spell=="aguamenti"):
hass.TriggerAutomation("automation.wand_aguamenti")
elif (spell=="alohomora"):
hass.TriggerAutomation("automation.wand_alohomora")
elif (spell=="silencio"):
hass.TriggerAutomation("automation.wand_silencio")
elif (spell=="specialis_revelio"):
hass.TriggerAutomation("automation.wand_specialis_revelio")
elif (spell=="revelio"):
hass.TriggerAutomation("automation.wand_revelio")
elif (spell == "tarantallegra"):
hass.TriggerAutomation("automation.wand_tarantallegra")
def CheckForPattern(wandTracks, exampleFrame):
"""
Check the given wandTracks to see if is is complete, and if it matches a trained spell
"""
global find_new_wands, LastSpell
if (wandTracks == None or len(wandTracks) == 0):
return
thickness = 10
croppedMax = TrainingResolution - thickness
distances = []
wand_path_frame = np.zeros_like(exampleFrame)
prevTrack = wandTracks[0]
for track in wandTracks:
x1 = prevTrack[0]
x2 = track[0]
y1 = prevTrack[1]
y2 = track[1]
# Calculate the distance
distance = math.sqrt((x1 - x2)**2 + (y1 - y2)**2)
distances.append(distance)
cv2.line(wand_path_frame, (x1, y1),(x2, y2), (255,255,255), thickness)
prevTrack = track
mostRecentDistances = distances[-NumDistancesToAverage:]
avgMostRecentDistances = mean(mostRecentDistances)
sumDistances = sum(distances)
contours, hierarchy = cv2.findContours(wand_path_frame,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
# Determine if wand stopped moving by looking at recent movement (avgMostRecentDistances), and check the length of distances to make sure the spell is reasonably long
if (avgMostRecentDistances < SpellEndMovement and len(distances) > MinSpellLength):
# Make sure wand path is valid and is over the defined minimum distance
if (len(contours) > 0) and sumDistances > MinSpellDistance:
cnt = contours[0]
x,y,w,h = cv2.boundingRect(cnt)
crop = wand_path_frame[y-10:y+h+10,x-30:x+w+30]
result = ClassifyImage(crop);
cv2.putText(wand_path_frame, result, (0,50), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
print("Result: ", result, " Most Recent avg: ", avgMostRecentDistances, " Length Distances: ", len(distances), " Sum Distances: ", sumDistances)
print("")
PerformSpell(result)
LastSpell = result
find_new_wands = True
wandTracks.clear()
if wand_path_frame is not None:
if (IsShowOutput):
wandPathFrameWithText = AddIterationsPerSecText(wand_path_frame, outputCps.countsPerSec())
cv2.putText(wandPathFrameWithText, "Last Spell: " + LastSpell, (10, 400), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
cv2.imshow("Output", wandPathFrameWithText)
return wandTracks
def RemoveBackground():
"""
Thread for removing background
"""
global frame, frame_no_background, IsNewFrame, IsNewFrameNoBackground
fgbg = cv2.createBackgroundSubtractorMOG2()
t = threading.currentThread()
while getattr(t, "do_run", True):
if (IsNewFrame):
IsNewFrame = False
frameCopy = frame.copy()
# Subtract Background
fgmask = fgbg.apply(frameCopy, learningRate=0.001)
frame_no_background = cv2.bitwise_and(frameCopy, frameCopy, mask = fgmask)
IsNewFrameNoBackground = True
if (IsShowBackgroundRemoved):
frameNoBackgroundWithCounts = AddIterationsPerSecText(frame_no_background.copy(), noBackgroundCps.countsPerSec())
cv2.imshow("BackgroundRemoved", frameNoBackgroundWithCounts)
else:
time.sleep(0.001)
def CalculateThreshold():
"""
Thread for calculating frame threshold
"""
global frame, frame_no_background, frameThresh, IsNewFrame, IsNewFrameNoBackground, IsNewFrameThreshold
t = threading.currentThread()
thresholdValue = 240
while getattr(t, "do_run", True):
if (IsRemoveBackground and IsNewFrameNoBackground) or (not IsRemoveBackground and IsNewFrame):
if IsRemoveBackground:
IsNewFrameNoBackground = False
frame_gray = cv2.cvtColor(frame_no_background, cv2.COLOR_BGR2GRAY)
if not IsRemoveBackground:
IsNewFrame = False
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
ret, frameThresh = cv2.threshold(frame_gray, thresholdValue, 255, cv2.THRESH_BINARY);
IsNewFrameThreshold = True
if (IsShowThreshold):
frameThreshWithCounts = AddIterationsPerSecText(frameThresh.copy(), thresholdCps.countsPerSec())
cv2.imshow("Threshold", frameThreshWithCounts)
else:
time.sleep(0.001)
def ProcessData():
"""
Thread for processing final frame
"""
global frameThresh, IsNewFrameThreshold, findNewWands, wandTracks, outputFrameCount
oldFrameThresh = None
trackedPoints = None
t = threading.currentThread()
while getattr(t, "do_run", True):
if (IsNewFrameThreshold):
if (IsDebugFps):
outputFrameCount = outputFrameCount + 1
IsNewFrameThreshold = False
localFrameThresh = frameThresh.copy()
if (findNewWands):
# Identify Potential Wand Tips using GoodFeaturesToTrack
trackedPoints = cv2.goodFeaturesToTrack(localFrameThresh, 5, .01, 30)
if trackedPoints is not None:
findNewWands = False
else:
# calculate optical flow
nextPoints, statusArray, err = cv2.calcOpticalFlowPyrLK(oldFrameThresh, localFrameThresh, trackedPoints, None, **lk_params)
# Select good points
good_new = nextPoints[statusArray==1]
good_old = trackedPoints[statusArray==1]
if (len(good_new) > 0):
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
wandTracks.append([a, b])
# Update which points are tracked
trackedPoints = good_new.copy().reshape(-1,1,2)
wandTracks = CheckForPattern(wandTracks, localFrameThresh)
else:
# No Points were tracked, check for a pattern and start searching for wands again
#wandTracks = CheckForPattern(wandTracks, localFrameThresh)
wandTracks = []
findNewWands = True
# Store Previous Threshold Frame
oldFrameThresh = localFrameThresh
else:
time.sleep(0.001)
def AddIterationsPerSecText(frame, iterations_per_sec):
"""
Add iterations per second text to lower-left corner of a frame.
"""
cv2.putText(frame, "{:.0f} iterations/sec".format(iterations_per_sec),
(10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
return frame
timeLastPrintedFps = datetime.datetime.now()
inputFrameCount = 0
outputFrameCount = 0
# Initialize and traing the spell classification algorithm
InitClassificationAlgo()
# Start thread to remove frame background
if IsRemoveBackground:
RemoveBackgroundThread = Thread(target=RemoveBackground)
RemoveBackgroundThread.do_run = True
RemoveBackgroundThread.daemon = True
RemoveBackgroundThread.start()
# Start thread to calculate threshold
CalculateThresholdThread = Thread(target=CalculateThreshold)
CalculateThresholdThread.do_run = True
CalculateThresholdThread.daemon = True
CalculateThresholdThread.start()
# Start thread to process final frame
ProcessDataThread = Thread(target=ProcessData)
ProcessDataThread.do_run = True
ProcessDataThread.daemon = True
ProcessDataThread.start()
# Set OpenCV video capture source
videoCapture = cv2.VideoCapture(videoSource)
# Main Loop
while True:
# Get most recent frame
ret, localFrame = videoCapture.read()
if (ret):
frame = localFrame.copy()
# If successful, flip the frame and set the Flag for the next process to take over
cv2.flip(frame, 1, frame) # Flipping the frame is done so the spells look like what we expect, instead of the mirror image
IsNewFrame = True
if (IsDebugFps):
inputFrameCount = inputFrameCount + 1
# Print FPS Debug info every second
if ((datetime.datetime.now() - timeLastPrintedFps).seconds >= 1 ):
timeLastPrintedFps = datetime.datetime.now()
print("FPS: %d/%d" %(inputFrameCount, outputFrameCount))
inputFrameCount = 0
outputFrameCount = 0
# Update Windows
if (IsShowOriginal):
frameWithCounts = AddIterationsPerSecText(frame.copy(), originalCps.countsPerSec())
cv2.imshow("Original", frameWithCounts)
elif not ret:
# If an error occurred, try initializing the video capture again
videoCapture = cv2.VideoCapture(videoSource)
# Check for ESC key, if pressed shut everything down
if (cv2.waitKey(1) is 27):
break
# Shutdown PyPotter
if IsRemoveBackground:
RemoveBackgroundThread.do_run = False
RemoveBackgroundThread.join()
CalculateThresholdThread.do_run = False
ProcessDataThread.do_run = False
CalculateThresholdThread.join()
ProcessDataThread.join()
cv2.destroyAllWindows() | 33.481799 | 170 | 0.664364 |
acf0b6d69a65a854fa2b93d22b22cacef8e558bd | 398 | py | Python | MathematicalChallenges/14_ISBN/Task14.py | kamil2789/TasksCollection | ed4f84b431b42a4649a7ac042c07fe7e27a71c40 | [
"MIT"
] | 1 | 2021-07-12T17:14:53.000Z | 2021-07-12T17:14:53.000Z | MathematicalChallenges/14_ISBN/Task14.py | kamil2789/TasksCollection | ed4f84b431b42a4649a7ac042c07fe7e27a71c40 | [
"MIT"
] | null | null | null | MathematicalChallenges/14_ISBN/Task14.py | kamil2789/TasksCollection | ed4f84b431b42a4649a7ac042c07fe7e27a71c40 | [
"MIT"
] | null | null | null | def is_isbn_valid(isbn):
clear_isbn = [x for x in isbn if x != '-']
print(clear_isbn)
if len(clear_isbn) != 10:
return False
sum = 0
for item in range(10, 1, -1):
sum += int(clear_isbn[10 - item]) * item
control_number = int(clear_isbn[-1])
return (11 - sum % 11) % 11 == control_number
isbn = input("Check 10-ISBN number:")
print(is_isbn_valid(isbn))
| 26.533333 | 49 | 0.603015 |
acf0b70034d2d612706886b660e05c5920aca693 | 16,384 | py | Python | zoom/_assets/standard_apps/admin/index.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
] | null | null | null | zoom/_assets/standard_apps/admin/index.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
] | null | null | null | zoom/_assets/standard_apps/admin/index.py | zodman/ZoomFoundry | 87a69f519a2ab6b63aeec0a564ce41259e64f88d | [
"MIT"
] | null | null | null | """
users index
"""
import datetime
import json
import os
import sys
import platform
import zoom
import zoom.apps
import zoom.html as h
from zoom.helpers import link_to, link_to_page
from zoom.page import page
from zoom.tools import load_content, today, how_long_ago
from zoom.users import link_to_user
import views
import users
import groups
def log_data(db, status, n, limit, q):
"""retreive log data"""
host = zoom.system.request.host
statuses = tuple(status)
offset = int(n) * int(limit)
cmd = """
select
id,
status,
user_id,
address,
app,
path,
timestamp,
elapsed
from log
where status in %s and server = %s
order by id desc
limit {limit}
offset {offset}
""".format(
limit=int(limit),
offset=offset,
statuses=statuses
)
data = db(cmd, statuses, host)
data = [
[
link_to(
str(item[0]),
'/admin/entry/' + str(item[0])
),
item[1],
zoom.helpers.who(item[2]),
item[3],
item[4],
zoom.link_to(item[5]),
how_long_ago(item[6]),
] + list(item[6:])
for item in data
if q in repr(item)
]
labels = (
'id', 'status', 'user', 'address', 'app',
'path', 'when', 'timestamp', 'elapsed'
)
return zoom.browse(data, labels=labels)
def activity_panel(db):
host = zoom.system.request.host
data = db("""
select
log.id,
users.username,
log.address,
log.path,
log.timestamp,
log.elapsed
from log left join users on log.user_id = users.id
where server = %s and path not like "%%\\/\\_%%"
and log.status = 'C'
order by timestamp desc
limit 15
""", host)
rows = []
for rec in data:
row = [
link_to(str(rec[0]), '/admin/entry/' + str(rec[0])),
link_to_user(rec[1]),
rec[2],
zoom.link_to(rec[3]),
how_long_ago(rec[4]),
rec[4],
rec[5],
]
rows.append(row)
labels = 'id', 'user', 'address', 'path', 'when', 'timestamp', 'elapsed'
return zoom.browse(rows, labels=labels, title=link_to_page('Requests'))
def error_panel(db):
host = zoom.system.request.host
data = db("""
select
log.id,
username,
path,
timestamp
from log left join users on log.user_id = users.id
where log.status in ("E") and timestamp>=%s
and server = %s
order by log.id desc
limit 10
""", today(), host)
rows = []
for rec in data:
row = [
link_to(str(rec[0]), '/admin/entry/' + str(rec[0])),
link_to_user(rec[1]),
rec[2],
how_long_ago(rec[3]),
]
rows.append(row)
labels = 'id', 'user', 'path', 'when'
return zoom.browse(rows, labels=labels, title=link_to_page('Errors'))
def warning_panel(db):
host = zoom.system.request.host
data = db("""
select
log.id,
username,
path,
timestamp
from log inner join users on log.user_id = users.id
where log.status in ("W") and timestamp>=%s
and server = %s
order by log.id desc
limit 10
""", today(), host)
rows = []
for rec in data:
row = [
link_to(str(rec[0]), '/admin/entry/' + str(rec[0])),
link_to_user(rec[1]),
rec[2],
how_long_ago(rec[3]),
]
rows.append(row)
labels = 'id', 'user', 'path', 'when'
return zoom.browse(rows, labels=labels, title=link_to_page('Warnings'))
def users_panel(db):
host = zoom.system.request.host
data = db("""
select
users.username,
max(log.timestamp) as timestamp,
count(*) as requests
from log, users
where log.user_id = users.id
and timestamp >= %s
and server = %s
and path not like "%%\\/\\_%%"
group by users.username
order by timestamp desc
limit 10
""", today() - datetime.timedelta(days=14), host)
rows = []
for rec in data:
row = [
link_to_user(rec[0]),
how_long_ago(rec[1]),
rec[2],
]
rows.append(row)
labels = 'user', 'last seen', 'requests'
return zoom.browse(rows, labels=labels, title=link_to_page('Users'))
def callback(method, url=None, timeout=5000):
method_name = method.__name__
path = url or '/<dz:app_name>/' + method_name
js = """
jQuery(function($){
setInterval(function(){
$.get('%(path)s', function( content ){
if (content) {
$('#%(method_name)s').html( content );
}
});
}, %(timeout)s);
});
""" % dict(
path=path,
method_name=method_name,
timeout=timeout
)
content = '<div id="%(method_name)s">%(initial_value)s</div>' % dict(
initial_value=method().content,
method_name=method_name
)
return zoom.Component(content, js=js)
class MyView(zoom.View):
def index(self, q=''):
content = callback(self._index)
if q:
request = zoom.system.request
users_collection = users.get_users_collection(request)
user_records = users_collection.search(q)
groups_collection = groups.get_groups_collection(request)
group_records = groups_collection.search(q)
if user_records or group_records:
content = zoom.Component()
if group_records:
footer = '%d groups found' % len(group_records)
content += zoom.browse(
group_records,
columns=groups_collection.columns,
labels=groups_collection.labels,
title='Groups',
footer=footer,
)
if user_records:
footer = '%d users found' % len(user_records)
content += zoom.browse(
user_records,
columns=users_collection.columns,
labels=users_collection.labels,
title='Users',
footer=footer,
)
else:
zoom.alerts.warning('no records found')
return page(
content,
title='Overview',
search=q
)
def _index(self):
self.model.site.logging = False
db = self.model.site.db
content = zoom.Component(
views.index_metrics_view(db),
views.IndexPageLayoutView(
feed1=activity_panel(db),
feed2=users_panel(db),
feed3=error_panel(db),
feed4=warning_panel(db),
),
)
return zoom.partial(content)
def clear(self):
"""Clear the search"""
return zoom.home()
def log(self):
"""view system log"""
save_logging = self.model.site.logging
try:
content = callback(self._system_log)
finally:
self.model.site.logging = save_logging
return page(content, title='System Log')
def _system_log(self):
self.model.site.logging = False
db = self.model.site.db
data = db(
"""
select
id, app, path, status, address, elapsed, message
from log
order by id desc limit 50
"""
)
return zoom.browse(data)
def audit(self):
"""view audit log"""
def fmt(rec):
user = (zoom.helpers.who(rec[2]),)
when = (zoom.helpers.when(rec[-1]),)
return rec[0:2] + user + rec[3:-1] + when + rec[-1:]
db = self.model.site.db
data = list(map(fmt, db("""
select
*
from audit_log
order by id desc
limit 100"""
)))
labels = 'ID', 'App', 'By Whom', 'Activity', 'Subject 1', 'Subject 2', 'When', 'Timestamp'
return page(zoom.browse(data, labels=labels), title='Activity')
def requests(self, show_all=False):
def fmt(rec):
entry = (link_to(str(rec[0]), '/admin/entry/' + str(rec[0])),)
user = (zoom.helpers.who(rec[4]),)
link = (zoom.link_to(rec[2]),)
return entry + (rec[1],) + link + rec[3:4] + user + rec[5:]
path_filter = '' if show_all else 'and path not like "%%\\/\\_%%"'
db = self.model.site.db
data = db("""
select
id, app, path, status, user_id, address, login, timestamp, elapsed
from log
where status in ('C', 'I', 'W')
and server = %s
{}
order by id desc
limit 100""".format(path_filter), zoom.system.request.host)
labels = 'id', 'app', 'path', 'status', 'user', 'address', 'login', 'timestamp', 'elapsed'
data = list(map(fmt, data))
actions = () if show_all else ('Show All',)
return page(zoom.browse(data, labels=labels), title='Requests', actions=actions)
def performance(self, n=0, limit=50, q=''):
db = self.model.site.db
return page(log_data(db, ['P'], n, limit, q), title='Performance', search=q, clear='/admin/performance')
def activity(self, n=0, limit=50, q=''):
db = self.model.site.db
return page(log_data(db, ['A'], n, limit, q), title='Activity', search=q, clear='/admin/activity')
def errors(self, n=0, limit=50, q=''):
db = self.model.site.db
return page(log_data(db, ['E'], n, limit, q), title='Errors', search=q, clear='/admin/errors')
def warnings(self, n=0, limit=50, q=''):
db = self.model.site.db
return page(log_data(db, ['W'], n, limit, q), title='Warnings', search=q, clear='/admin/warnings')
def entry(self, key):
def fmt(item):
name, value = item
return name, zoom.html.pre(value)
entries = zoom.table_of('log')
entry = list(entries.first(id=key).items())
visible = lambda a: not a[0].startswith('_')
content = zoom.html.table(
map(fmt, filter(visible, entry))
)
css = """
.content table {
width: 80%;
vertical-align: top;
}
.content table td:nth-child(1) {
width: 30%;
}
.content table td {
padding: 5px;
line-height: 20px;
}
.content table pre {
padding: 0px;
background: 0;
border: none;
line-height: 20px;
margin: 0;
}
"""
return page(content, title='Log Entry', css=css)
def configuration(self):
"""Return the configuration page"""
get = zoom.system.site.config.get
site = zoom.system.site
request = zoom.system.request
app = zoom.system.request.app
system_apps = get('apps', 'system', ','.join(zoom.apps.DEFAULT_SYSTEM_APPS))
main_apps = get('apps', 'main', ','.join(zoom.apps.DEFAULT_MAIN_APPS))
items = zoom.packages.get_registered_packages()
packages = (
(
key,
'<br>'.join(
'{resources}'.format(
resources='<br>'.join(resources)
) for resource_type, resources
in sorted(parts.items(), key=lambda a: ['requires', 'styles', 'libs'].index(a[0]))
)
) for key, parts in sorted(items.items())
)
return page(
zoom.Component(
h.h2('Site'),
zoom.html.table([(k, getattr(site, k)) for k in
(
'name',
'path',
'owner_name',
'owner_email',
'owner_url',
'admin_email',
'csrf_validation',
)
]),
h.h2('Users'),
zoom.html.table([(k, getattr(site, k)) for k in
(
'guest',
'administrators_group',
'developers_group',
)
]),
h.h2('Apps'),
zoom.html.table([(k, getattr(site, k)) for k in
(
'index_app_name',
'home_app_name',
'login_app_name',
'auth_app_name',
'locate_app_name',
)
] + [
('app.path', app.path),
('apps_paths', '<br>'.join(site.apps_paths)),
('main_apps', main_apps),
('system_apps', system_apps),
]),
h.h2('Theme'),
zoom.html.table([
('name', site.theme),
('path', site.theme_path),
('comments', site.theme_comments),
]),
h.h2('Sessions'),
zoom.html.table([(k, getattr(site, k)) for k in
('secure_cookies',)
]),
h.h2('Monitoring'),
zoom.html.table([
('logging', site.logging),
('profiling', site.profiling),
('app_database', site.monitor_app_database),
('system_database', site.monitor_system_database),
]),
h.h2('Errors'),
zoom.html.table([
('users', get('errors', 'users', False)),
]),
h.h2('Packages'),
zoom.html.table(
packages,
),
css = """
.content table { width: 100%; }
.content table td { vertical-align: top; width: 70%; }
.content table td:first-child { width: 25%; }
"""
),
title='Environment'
)
def environment(self):
return page(
zoom.Component(
h.h2('Zoom'),
zoom.html.table([
('Version', zoom.__version__ + ' Community Edition'),
('Installed Path', zoom.tools.zoompath()),
]),
h.h2('Python'),
zoom.html.table([
('sys.version', sys.version),
('sys.path', '<br>'.join(sys.path)),
]),
h.h2('Operating System'),
zoom.html.table([
('Name', os.name),
('PATH', '<br>'.join(os.environ.get('PATH').split(':')))
]),
h.h2('Platform'),
zoom.html.table([
('Node', platform.node()),
('System', platform.system()),
('Machine', platform.machine()),
('Archtitecture', ' '.join(platform.architecture()))
]),
h.h2('Variables'),
zoom.html.table(
list(os.environ.items())
),
css = """
.content table { width: 100%; }
.content table td { vertical-align: top; width: 70%; }
.content table td:first-child { width: 25%; }
"""
),
title='Environment'
)
def about(self, *a):
return page(load_content('about.md', version=zoom.__version__ + ' Community Edition'))
def main(route, request):
"""main program"""
view = MyView(request)
return view(*request.route[1:], **request.data)
| 29.952468 | 112 | 0.466614 |
acf0b71b12650a52395413a21da94971603899ec | 2,291 | py | Python | api/tests/test_permissions.py | connectthefuture/calc | 7f0dc01d9265f26a36c2b9b5ee779fe876e4a494 | [
"CC0-1.0"
] | null | null | null | api/tests/test_permissions.py | connectthefuture/calc | 7f0dc01d9265f26a36c2b9b5ee779fe876e4a494 | [
"CC0-1.0"
] | 1 | 2021-06-10T23:13:04.000Z | 2021-06-10T23:13:04.000Z | api/tests/test_permissions.py | connectthefuture/calc | 7f0dc01d9265f26a36c2b9b5ee779fe876e4a494 | [
"CC0-1.0"
] | null | null | null | from unittest import mock
from django.test import SimpleTestCase, override_settings
from ..permissions import WhiteListPermission
@override_settings(REST_FRAMEWORK={'WHITELIST': ['1.1.2.2']})
class WhiteListPermissionTests(SimpleTestCase):
@override_settings(REST_FRAMEWORK={'WHITELIST': None})
def test_it_returns_true_when_no_whitelist_setting(self):
w = WhiteListPermission()
self.assertTrue(w.has_permission(None, None))
def test_it_returns_true_when_forwarded_for_ip_is_whitelisted(self):
w = WhiteListPermission()
req = mock.MagicMock(
META={'HTTP_X_FORWARDED_FOR': '5.5.5.5, 1.1.2.2, 2.2.3.3'}
)
self.assertTrue(w.has_permission(req, None))
req = mock.MagicMock(
META={'HTTP_X_FORWARDED_FOR': ' 1.1.2.2 '}
)
self.assertTrue(w.has_permission(req, None))
def test_it_returns_false_when_forwarded_for_ip_is_not_whitelisted(self):
w = WhiteListPermission()
req = mock.MagicMock(
META={'HTTP_X_FORWARDED_FOR': '5.5.5.5, 2.2.3.3'}
)
self.assertFalse(w.has_permission(req, None))
def test_it_returns_true_when_remote_addr_is_whitelisted(self):
w = WhiteListPermission()
req = mock.MagicMock(
META={'REMOTE_ADDR': '1.1.2.2'}
)
self.assertTrue(w.has_permission(req, None))
def test_it_returns_false_when_remote_addr_is_not_whitelisted(self):
w = WhiteListPermission()
req = mock.MagicMock(
META={'REMOTE_ADDR': '5.6.7.8'}
)
self.assertFalse(w.has_permission(req, None))
def test_it_returns_false_when_neither_header_has_whitelisted_ip(self):
w = WhiteListPermission()
req = mock.MagicMock(
META={
'HTTP_X_FORWARDED_FOR': '5.5.5.5, 2.2.3.3',
'REMOTE_ADDR': '5.6.7.8'
}
)
self.assertFalse(w.has_permission(req, None))
def test_it_prefers_forwarded_for_to_remote_addr(self):
w = WhiteListPermission()
req = mock.MagicMock(
META={
'HTTP_X_FORWARDED_FOR': '5.5.5.5, 2.2.3.3',
'REMOTE_ADDR': '1.1.2.2'
}
)
self.assertFalse(w.has_permission(req, None))
| 35.246154 | 77 | 0.630292 |
acf0b7a9a7ce60f648e03daea78f60831b490675 | 7,021 | py | Python | config/settings/production.py | txl518/redditclone | dcf150ef0f9487ed736363a9a37469b22e9f9cc4 | [
"BSD-3-Clause"
] | null | null | null | config/settings/production.py | txl518/redditclone | dcf150ef0f9487ed736363a9a37469b22e9f9cc4 | [
"BSD-3-Clause"
] | null | null | null | config/settings/production.py | txl518/redditclone | dcf150ef0f9487ed736363a9a37469b22e9f9cc4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['myreddit.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
STATIC_URL = MEDIA_URL
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Reddit Clone <noreply@myreddit.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[Reddit Clone] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| 35.105 | 117 | 0.626691 |
acf0b7c5c2badc332a5d522b543a050af68eb34b | 550 | py | Python | apps/challenges/migrations/0032_adds_featured_field_in_challenge.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 1,470 | 2016-10-21T01:21:45.000Z | 2022-03-30T14:08:29.000Z | apps/challenges/migrations/0032_adds_featured_field_in_challenge.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 2,594 | 2016-11-02T03:36:01.000Z | 2022-03-31T15:30:04.000Z | apps/challenges/migrations/0032_adds_featured_field_in_challenge.py | kaustubh-s1/EvalAI | 1884811e7759e0d095f7afb68188a7f010fa65dc | [
"BSD-3-Clause"
] | 865 | 2016-11-09T17:46:32.000Z | 2022-03-30T13:06:52.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-09-29 07:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("challenges", "0031_add_db_index_to_challenge_related_models")
]
operations = [
migrations.AddField(
model_name="challenge",
name="featured",
field=models.BooleanField(
db_index=True, default=False, verbose_name="Featured"
),
)
]
| 23.913043 | 71 | 0.62 |
acf0b9802375efc7e7d05c5169df28913a2585d3 | 2,756 | py | Python | app/core/tests/test_models.py | Wang-Shiyu/recipe_app_api | 69d3f9c103864e1b8a98d474874b2af8383f515e | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | Wang-Shiyu/recipe_app_api | 69d3f9c103864e1b8a98d474874b2af8383f515e | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | Wang-Shiyu/recipe_app_api | 69d3f9c103864e1b8a98d474874b2af8383f515e | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@fii-na.com', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@fii-na.com'
password = "Testpass123!"
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@FII-NA.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@fii-na.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
# tag model
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
# ingredient model
def test_ingredient_str(self):
"""Test the ingredients string presentation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
# recipe model
def test_recipe_str(self):
"""Test the recipes string presentation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test image is saved in correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 29.956522 | 70 | 0.634253 |
acf0b9baeadb8166cdbb8dd5d9bfee07e5ca43b5 | 29,985 | py | Python | dynaphopy/interface/iofile/__init__.py | amosyang1986/DynaPhoPy | 3cab44e982efd02fdcc5f8b53a286186eb117c9e | [
"MIT"
] | 1 | 2018-03-28T19:19:17.000Z | 2018-03-28T19:19:17.000Z | dynaphopy/interface/iofile/__init__.py | amosyang1986/DynaPhoPy | 3cab44e982efd02fdcc5f8b53a286186eb117c9e | [
"MIT"
] | null | null | null | dynaphopy/interface/iofile/__init__.py | amosyang1986/DynaPhoPy | 3cab44e982efd02fdcc5f8b53a286186eb117c9e | [
"MIT"
] | null | null | null | import mmap
import os
import numpy as np
import dynaphopy.dynamics as dyn
import dynaphopy.atoms as atomtest
from dynaphopy.interface import phonopy_link as pho_interface
def diff_matrix(array_1, array_2, cell_size):
"""
:param array_1: supercell scaled positions respect unit cell
:param array_2: supercell scaled positions respect unit cell
:param cell_size: diference between arrays accounting for periodicity
:return:
"""
array_1_norm = np.array(array_1) / np.array(cell_size, dtype=float)[None,:]
array_2_norm = np.array(array_2) / np.array(cell_size, dtype=float)[None,:]
return array_2_norm - array_1_norm
def check_atoms_order(filename, trajectory_reading_function, structure):
trajectory = trajectory_reading_function(filename,
structure=structure,
initial_cut=0,
end_cut=1
)
# For now average_positions() depends on order of atoms so can't used for this at this time
# In future however this should work
# reference = trajectory.average_positions()
# Only using first step
reference = trajectory.trajectory[0]
template = get_correct_arrangement(reference, structure)
return template
def get_correct_arrangement(reference, structure):
# print structure.get_scaled_positions()
scaled_coordinates = []
for coordinate in reference:
trans = np.dot(coordinate, np.linalg.inv(structure.get_cell()))
#print coordinate.real, trans.real
scaled_coordinates.append(np.array(trans.real, dtype=float))
number_of_cell_atoms = structure.get_number_of_atoms()
number_of_supercell_atoms = len(scaled_coordinates)
supercell_dim = np.array([int(round(2*a+1)) for a in np.average(scaled_coordinates, axis=0)])-1
# print 'atom', number_of_cell_atoms, number_of_supercell_atoms
unit_cell_scaled_coordinates = scaled_coordinates - np.array(scaled_coordinates, dtype=int)
atom_unit_cell_index = []
for coordinate in unit_cell_scaled_coordinates:
# Only works for non symmetric cell (must be changed)
diff = np.abs(np.array([coordinate]*number_of_cell_atoms) - structure.get_scaled_positions())
diff[diff >= 0.5] -= 1.0
diff[diff < -0.5] += 1.0
# print 'diff', diff
# print 'postions', structure.get_scaled_positions()
index = np.argmin(np.linalg.norm(diff, axis=1))
# print 'test', coordinate, index
atom_unit_cell_index.append(index)
atom_unit_cell_index = np.array(atom_unit_cell_index)
# np.savetxt('index.txt', np.sort(atom_unit_cell_index))
# np.savetxt('test.txt', unit_coordinates)
# np.savetxt('test2.txt', np.array([type_0(j, cell_size, number_of_cell_atoms)[:3] for j in range(number_of_supercell_atoms)]))
# print supercell_dim, number_of_supercell_atoms
original_conf = np.array([dynaphopy_order(j, supercell_dim)[:3] for j in range(number_of_supercell_atoms)])
# np.savetxt('original.txt', original_conf)
# np.savetxt('unitcoor.txt', scaled_coordinates)
# print np.array(scaled_coordinates).shape
# print original_conf.shape
template = []
lp_coordinates = []
for i, coordinate in enumerate(scaled_coordinates):
lattice_points_coordinates = coordinate - structure.get_scaled_positions()[atom_unit_cell_index[i]]
#print 'c', i, coordinate, coordinate2
for k in range(3):
if lattice_points_coordinates[k] > supercell_dim[k] - 0.5:
lattice_points_coordinates[k] = lattice_points_coordinates[k] - supercell_dim[k]
if lattice_points_coordinates[k] < -0.5:
lattice_points_coordinates[k] = lattice_points_coordinates[k] + supercell_dim[k]
comparison_cell = np.array([lattice_points_coordinates]*number_of_supercell_atoms)
diference = np.linalg.norm(diff_matrix(original_conf, comparison_cell, supercell_dim), axis=1)
template.append(np.argmin(diference) + atom_unit_cell_index[i]*number_of_supercell_atoms/number_of_cell_atoms)
lp_coordinates.append(lattice_points_coordinates)
template = np.array(template)
# lp_coordinates = np.array(lp_coordinates)
# print original_conf.shape, lp_coordinates.shape, template.shape
# np.savetxt('index2.txt', np.sort(template))
# np.savetxt('index_tot.txt', np.sort(template*number_of_cell_atoms + atom_unit_cell_index))
# inv_template = inverse_template(template)
# inv_template = np.argsort(template)
# dm = diff_matrix(original_conf, lp_coordinates[inv_template], supercell_dim)
# dm = diff_matrix(original_conf[template], lp_coordinates, supercell_dim)
# np.savetxt('template.txt', template)
# np.savetxt('lp.txt', lp_coordinates[inv_template])
# np.savetxt('diff.txt', dm)
if len(np.unique(template)) < len(template):
print ('template failed, auto-order will not be applied')
print ('unique: {} / {}'.format(len(np.unique(template)), len(template)))
return None
return template
def dynaphopy_order(i, size):
x = np.mod(i, size[0])
y = np.mod(i, size[0]*size[1])/size[1]
z = np.mod(i, size[0]*size[1]*size[2])/(size[1]*size[0])
k = i/(size[1]*size[0]*size[2])
return np.array([x, y, z, k])
def get_trajectory_parser(file_name, bytes_to_check=1000000):
from dynaphopy.interface.iofile import trajectory_parsers as tp
parsers_keywords = {'vasp_outcar': {'function': tp.read_vasp_trajectory,
'keywords': ['NIONS', 'POMASS', 'direct lattice vectors']},
'lammps_dump': {'function': tp.read_lammps_trajectory,
'keywords': ['ITEM: TIMESTEP', 'ITEM: NUMBER OF ATOMS', 'ITEM: BOX BOUNDS']},
'vasp_xdatcar': {'function': tp.read_VASP_XDATCAR,
'keywords': ['Direct configuration=', 'Direct configuration=', 'Direct configuration=']}}
# Check file exists
if not os.path.isfile(file_name):
print (file_name + ' file does not exist')
exit()
file_size = os.stat(file_name).st_size
# Check available parsers
for parser in parsers_keywords.values():
with open(file_name, "r+b") as f:
file_map = mmap.mmap(f.fileno(), np.min([bytes_to_check, file_size]))
num_test = [file_map.find(keyword.encode()) for keyword in list(parser['keywords'])]
if not -1 in num_test:
return parser['function']
return None
def read_from_file_structure_outcar(file_name):
# Check file exists
if not os.path.isfile(file_name):
print('Structure file does not exist!')
exit()
# Read from VASP OUTCAR file
print('Reading VASP structure')
with open(file_name, "r+b") as f:
# memory-map the file
file_map = mmap.mmap(f.fileno(), 0)
#Setting number of dimensions
number_of_dimensions = 3
#trash reading for guessing primitive cell (Not stable)
if False:
#Reading primitive cell (not sure about this, by default disabled)
position_number = file_map.find(b'PRICEL')
file_map.seek(position_number)
position_number = file_map.find(b'A1')
file_map.seek(position_number)
primitive_cell = [] #Primitive Cell
for i in range (number_of_dimensions):
primitive_cell.append(file_map.readline()
.replace(",", "")
.replace(")", "")
.replace(")","")
.split()[3:number_of_dimensions+3])
primitive_cell = np.array(primitive_cell,dtype="double")
#Reading number of atoms
position_number = file_map.find(b'NIONS =')
file_map.seek(position_number+7)
number_of_atoms = int(file_map.readline())
#Reading atoms per type
position_number = file_map.find(b'ions per type')
file_map.seek(position_number+15)
atoms_per_type = np.array(file_map.readline().split(),dtype=int)
#Reading atoms mass
position_number = file_map.find(b'POMASS =')
atomic_mass_per_type = []
for i in range(atoms_per_type.shape[0]):
file_map.seek(position_number+9+6*i)
atomic_mass_per_type.append(file_map.read(6))
atomic_mass = sum([[atomic_mass_per_type[j]
for i in range(atoms_per_type[j])]
for j in range(atoms_per_type.shape[0])],[])
atomic_mass = np.array(atomic_mass,dtype='double')
#Reading cell
position_number = file_map.find(b'direct lattice vectors')
file_map.seek(position_number)
file_map.readline()
direct_cell = [] #Direct Cell
for i in range (number_of_dimensions):
direct_cell.append(file_map.readline().split()[0:number_of_dimensions])
direct_cell = np.array(direct_cell,dtype='double')
file_map.seek(position_number)
file_map.readline()
reciprocal_cell = [] #Reciprocal cell
for i in range (number_of_dimensions):
reciprocal_cell.append(file_map.readline().split()[number_of_dimensions:number_of_dimensions*2])
reciprocal_cell = np.array(reciprocal_cell,dtype='double')
# Reading positions fractional cartesian
position_number=file_map.find(b'position of ions in fractional coordinates')
file_map.seek(position_number)
file_map.readline()
positions_fractional = []
for i in range (number_of_atoms):
positions_fractional.append(file_map.readline().split()[0:number_of_dimensions])
positions_fractional = np.array(positions_fractional,dtype='double')
#Reading positions cartesian
position_number=file_map.find(b'position of ions in cartesian coordinates')
file_map.seek(position_number)
file_map.readline()
positions = []
for i in range (number_of_atoms):
positions.append(file_map.readline().split()[0:3])
positions = np.array(positions,dtype='double')
file_map.close()
return atomtest.Structure(cell= direct_cell,
positions=positions,
masses=atomic_mass,
)
def read_from_file_structure_poscar(file_name, number_of_dimensions=3):
#Check file exists
if not os.path.isfile(file_name):
print('Structure file does not exist!')
exit()
#Read from VASP POSCAR file
print("Reading VASP POSCAR structure")
poscar_file = open(file_name, 'r')
data_lines = poscar_file.read().split('\n')
poscar_file.close()
multiply = float(data_lines[1])
direct_cell = np.array([data_lines[i].split()
for i in range(2, 2+number_of_dimensions)], dtype=float)
direct_cell *= multiply
scaled_positions = None
positions = None
try:
number_of_types = np.array(data_lines[3+number_of_dimensions].split(),dtype=int)
coordinates_type = data_lines[4+number_of_dimensions][0]
if coordinates_type == 'D' or coordinates_type == 'd' :
scaled_positions = np.array([data_lines[8+k].split()[0:3]
for k in range(np.sum(number_of_types))],dtype=float)
else:
positions = np.array([data_lines[8+k].split()[0:3]
for k in range(np.sum(number_of_types))],dtype=float)
atomic_types = []
for i,j in enumerate(data_lines[5].split()):
atomic_types.append([j]*number_of_types[i])
atomic_types = [item for sublist in atomic_types for item in sublist]
# atomic_types = np.array(atomic_types).flatten().tolist()
#Old style POSCAR format
except ValueError:
print ("Reading old style POSCAR")
number_of_types = np.array(data_lines[5].split(), dtype=int)
coordinates_type = data_lines[6][0]
if coordinates_type == 'D' or coordinates_type == 'd':
scaled_positions = np.array([data_lines[7+k].split()[0:3]
for k in range(np.sum(number_of_types))], dtype=float)
else:
positions = np.array([data_lines[7+k].split()[0:3]
for k in range(np.sum(number_of_types))], dtype=float)
atomic_types = []
for i,j in enumerate(data_lines[0].split()):
atomic_types.append([j]*number_of_types[i])
atomic_types = [item for sublist in atomic_types for item in sublist]
# atomic_types = np.array(atomic_types).flatten().tolist()
return atomtest.Structure(cell= direct_cell, # cell_matrix, lattice vectors in rows
scaled_positions=scaled_positions,
positions=positions,
atomic_elements=atomic_types,
# primitive_cell=primitive_cell
)
# Just for testing (use with care) Generates a harmonic trajectory using the harmonic eigenvectors.
# All phonon are set to have the same phase defined by phase_0. The aplitude of each phonon mode is
# ajusted for all to have the same energy. This amplitude is given in temperature units assuming that
# phonon energy follows a Maxwell-Boltzmann distribution
def generate_test_trajectory(structure, supercell=(1, 1, 1),
minimum_frequency=0.1, # THz
total_time=2, # picoseconds
time_step=0.002, # picoseconds
temperature=400, # Kelvin
silent=False,
memmap=False,
phase_0=0.0):
import random
from dynaphopy.power_spectrum import _progress_bar
print('Generating ideal harmonic data for testing')
kb_boltzmann = 0.831446 # u * A^2 / ( ps^2 * K )
number_of_unit_cells_phonopy = np.prod(np.diag(structure.get_supercell_phonon()))
number_of_unit_cells = np.prod(supercell)
# atoms_relation = float(number_of_unit_cells)/ number_of_unit_cells_phonopy
#Recover dump trajectory from file (test only)
import pickle
if False:
dump_file = open( "trajectory.save", "r" )
trajectory = pickle.load(dump_file)
return trajectory
number_of_atoms = structure.get_number_of_cell_atoms()
number_of_primitive_atoms = structure.get_number_of_primitive_atoms()
number_of_dimensions = structure.get_number_of_dimensions()
positions = structure.get_positions(supercell=supercell)
masses = structure.get_masses(supercell=supercell)
number_of_atoms = number_of_atoms*number_of_unit_cells
number_of_primitive_cells = number_of_atoms/number_of_primitive_atoms
atom_type = structure.get_atom_type_index(supercell=supercell)
#Generate additional wave vectors sample
# structure.set_supercell_phonon_renormalized(np.diag(supercell))
q_vector_list = pho_interface.get_commensurate_points(structure, np.diag(supercell))
q_vector_list_cart = [ np.dot(q_vector, 2*np.pi*np.linalg.inv(structure.get_primitive_cell()).T)
for q_vector in q_vector_list]
atoms_relation = float(len(q_vector_list)*number_of_primitive_atoms)/number_of_atoms
#Generate frequencies and eigenvectors for the testing wave vector samples
print('Wave vectors included in test (commensurate points)')
eigenvectors_r = []
frequencies_r = []
for i in range(len(q_vector_list)):
print(q_vector_list[i])
eigenvectors, frequencies = pho_interface.obtain_eigenvectors_and_frequencies(structure, q_vector_list[i])
eigenvectors_r.append(eigenvectors)
frequencies_r.append(frequencies)
number_of_frequencies = len(frequencies_r[0])
#Generating trajectory
if not silent:
_progress_bar(0, 'generating')
#Generating trajectory
trajectory = []
for time in np.arange(total_time, step=time_step):
coordinates = np.array(positions[:, :], dtype=complex)
for i_freq in range(number_of_frequencies):
for i_long, q_vector in enumerate(q_vector_list_cart):
if abs(frequencies_r[i_long][i_freq]) > minimum_frequency: # Prevent error due to small frequencies
amplitude = np.sqrt(number_of_dimensions * kb_boltzmann * temperature / number_of_primitive_cells * atoms_relation)/(frequencies_r[i_long][i_freq] * 2 * np.pi) # + random.uniform(-1,1)*0.05
normal_mode = amplitude * np.exp(np.complex(0, -1) * frequencies_r[i_long][i_freq] * 2.0 * np.pi * time)
phase = np.exp(np.complex(0, 1) * np.dot(q_vector, positions.T) + phase_0)
coordinates += (1.0 / np.sqrt(masses)[None].T *
eigenvectors_r[i_long][i_freq, atom_type] *
phase[None].T *
normal_mode).real
trajectory.append(coordinates)
if not silent:
_progress_bar(float(time + time_step) / total_time, 'generating', )
trajectory = np.array(trajectory)
time = np.array([i * time_step for i in range(trajectory.shape[0])], dtype=float)
energy = np.array([number_of_atoms * number_of_dimensions *
kb_boltzmann * temperature
for i in range(trajectory.shape[0])], dtype=float)
#Save a trajectory object to file for later recovery (test only)
if False:
dump_file = open("trajectory.save", "w")
pickle.dump(dyn.Dynamics(structure=structure,
trajectory=np.array(trajectory, dtype=complex),
energy=np.array(energy),
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell())),
dump_file)
dump_file.close()
# structure.set_supercell_phonon_renormalized(None)
return dyn.Dynamics(structure=structure,
trajectory=np.array(trajectory,dtype=complex),
energy=np.array(energy),
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
memmap=memmap)
#Testing function
def read_from_file_test():
print('Reading structure from test file')
#Condicions del test
number_of_dimensions = 2
f_coordinates = open('Data Files/test.out', 'r')
f_velocity = open('Data Files/test2.out', 'r')
f_trajectory = open('Data Files/test3.out', 'r')
#Coordinates reading
positions = []
while True:
row = f_coordinates.readline().split()
if not row: break
for i in range(len(row)): row[i] = float(row[i])
positions.append(row)
atom_type = np.array(positions,dtype=int)[:, 2]
positions = np.array(positions)[:,:number_of_dimensions]
print('Coordinates reading complete')
structure = atomtest.Structure(positions=positions,
atomic_numbers=atom_type,
cell=[[2,0],[0,1]],
masses=[1] * positions.shape[0]) #all 1
number_of_atoms = structure.get_number_of_atoms()
structure.set_number_of_primitive_atoms(2)
print('number of atoms in primitive cell')
print(structure.get_number_of_primitive_atoms())
print('number of total atoms in structure (super cell)')
print(number_of_atoms)
#Velocity reading section
velocity = []
while True:
row = f_velocity.readline().replace('I','j').replace('*','').replace('^','E').split()
if not row: break
for i in range(len(row)): row[i] = complex('('+row[i]+')')
velocity.append(row)
# velocity = velocity[:4000][:] #Limitate the number of points (just for testing)
time = np.array([velocity[i][0] for i in range(len(velocity))]).real
velocity = np.array([[[velocity[i][j*number_of_dimensions+k+1]
for k in range(number_of_dimensions)]
for j in range(number_of_atoms)]
for i in range (len(velocity))])
print('Velocity reading complete')
#Trajectory reading
trajectory = []
while True:
row = f_trajectory.readline().replace('I','j').replace('*','').replace('^','E').split()
if not row: break
for i in range(len(row)): row[i] = complex('('+row[i]+')')
trajectory.append(row)
trajectory = np.array([[[trajectory[i][j*number_of_dimensions+k+1]
for k in range(number_of_dimensions)]
for j in range(number_of_atoms)]
for i in range (len(trajectory))])
print('Trajectory reading complete')
return dyn.Dynamics(trajectory=trajectory,
#velocity=velocity,
time=time,
structure=structure)
def write_curve_to_file(frequency_range, curve_matrix, file_name):
output_file = open(file_name, 'w')
for i in range(curve_matrix.shape[0]):
output_file.write("{0:10.4f}\t".format(frequency_range[i]))
for j in curve_matrix[i, :]:
output_file.write("{0:.10e}\t".format(j))
output_file.write("\n")
output_file.close()
return 0
def read_parameters_from_input_file(file_name, number_of_dimensions=3):
input_parameters = {'structure_file_name_poscar': 'POSCAR'}
#Check file exists
if not os.path.isfile(file_name):
print (file_name + ' file does not exist')
exit()
input_file = open(file_name, "r").readlines()
for i, line in enumerate(input_file):
if line[0] == '#':
continue
if "STRUCTURE FILE OUTCAR" in line:
input_parameters.update({'structure_file_name_outcar': input_file[i+1].replace('\n','').strip()})
if "STRUCTURE FILE POSCAR" in line:
input_parameters.update({'structure_file_name_poscar': input_file[i+1].replace('\n','').strip()})
if "FORCE SETS" in line:
input_parameters.update({'force_sets_file_name': input_file[i+1].replace('\n','').strip()})
if "FORCE CONSTANTS" in line:
input_parameters.update({'force_constants_file_name': input_file[i+1].replace('\n','').strip()})
# print('Warning!: FORCE CONSTANTS label in input has changed. Please use FORCE SETS instead')
# exit()
if "PRIMITIVE MATRIX" in line:
primitive_matrix = [input_file[i+j+1].replace('\n','').split() for j in range(number_of_dimensions)]
input_parameters.update({'_primitive_matrix': np.array(primitive_matrix, dtype=float)})
if "SUPERCELL MATRIX" in line:
super_cell_matrix = [input_file[i+j+1].replace('\n','').split() for j in range(number_of_dimensions)]
super_cell_matrix = np.array(super_cell_matrix, dtype=int)
input_parameters.update({'supercell_phonon': np.array(super_cell_matrix, dtype=int)})
if "BANDS" in line:
bands = []
labels = []
while i < len(input_file)-1:
line = input_file[i + 1].replace('\n', '')
try:
labels.append(line.split(':')[1].replace('\n','').split(','))
line = line.split(':')[0]
except:
pass
try:
band = np.array(line.replace(',',' ').split(), dtype=float).reshape((2,3))
except IOError:
break
except ValueError:
break
i += 1
bands.append(band)
labels = [(label[0].replace(' ',''), label[1].replace(' ','')) for label in labels]
if labels != []:
input_parameters.update({'_band_ranges': {'ranges': bands,
'labels': labels}})
else:
input_parameters.update({'_band_ranges': {'ranges':bands}})
if "MESH PHONOPY" in line:
input_parameters.update({'_mesh_phonopy': np.array(input_file[i+1].replace('\n','').split(),dtype=int)})
return input_parameters
def write_xsf_file(file_name,structure):
xsf_file = open(file_name,"w")
xsf_file.write("CRYSTAL\n")
xsf_file.write("PRIMVEC\n")
for row in structure.get_primitive_cell():
xsf_file.write("{0:10.4f}\t{1:10.4f}\t{2:10.4f}\n".format(*row))
xsf_file.write("CONVVEC\n")
for row in structure.get_cell():
xsf_file.write("{0:10.4f}\t{1:10.4f}\t{2:10.4f}\n".format(*row))
xsf_file.write("PRIMCOORD\n")
xsf_file.write("{0:10d} {1:10d}\n".format(structure.get_number_of_primitive_atoms(),1))
counter = 0
while counter < structure.get_number_of_atom_types():
for i,value_type in enumerate(structure.get_atom_type_index()):
if value_type == counter:
xsf_file.write("{0:4d}\t{1:10.4f}\t{2:10.4f}\t{3:10.4f}\n".format(structure.get_atomic_numbers()[i],
*structure.get_positions()[i]))
counter += 1
break
xsf_file.close()
# Save & load HDF5 data file
def save_data_hdf5(file_name, time, super_cell, trajectory=None, velocity=None, vc=None, reduced_q_vector=None):
import h5py
hdf5_file = h5py.File(file_name, "w")
if trajectory is not None:
hdf5_file.create_dataset('trajectory', data=trajectory)
if velocity is not None:
hdf5_file.create_dataset('velocity', data=velocity)
if vc is not None:
hdf5_file.create_dataset('vc', data=vc)
if reduced_q_vector is not None:
hdf5_file.create_dataset('reduced_q_vector', data=reduced_q_vector)
hdf5_file.create_dataset('time', data=time)
hdf5_file.create_dataset('super_cell', data=super_cell)
# print("saved", velocity.shape[0], "steps")
hdf5_file.close()
def initialize_from_hdf5_file(file_name, structure, read_trajectory=True, initial_cut=1, final_cut=None, memmap=False):
import h5py
print("Reading data from hdf5 file: " + file_name)
trajectory = None
velocity = None
vc = None
reduced_q_vector = None
#Check file exists
if not os.path.isfile(file_name):
print(file_name + ' file does not exist!')
exit()
hdf5_file = h5py.File(file_name, "r")
if "trajectory" in hdf5_file and read_trajectory is True:
trajectory = hdf5_file['trajectory'][:]
if final_cut is not None:
trajectory = trajectory[initial_cut-1:final_cut]
else:
trajectory = trajectory[initial_cut-1:]
if "velocity" in hdf5_file:
velocity = hdf5_file['velocity'][:]
if final_cut is not None:
velocity = velocity[initial_cut-1:final_cut]
else:
velocity = velocity[initial_cut-1:]
if "vc" in hdf5_file:
vc = hdf5_file['vc'][:]
if final_cut is not None:
vc = vc[initial_cut-1:final_cut]
else:
vc = vc[initial_cut-1:]
if "reduced_q_vector" in hdf5_file:
reduced_q_vector = hdf5_file['reduced_q_vector'][:]
print("Load trajectory projected onto {0}".format(reduced_q_vector))
time = hdf5_file['time'][:]
supercell = hdf5_file['super_cell'][:]
hdf5_file.close()
if vc is None:
return dyn.Dynamics(structure=structure,
trajectory=trajectory,
velocity=velocity,
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
memmap=memmap)
else:
return vc, reduced_q_vector, dyn.Dynamics(structure=structure,
time=time,
supercell=np.dot(np.diagflat(supercell), structure.get_cell()),
memmap=memmap)
def save_quasiparticle_data_to_file(quasiparticle_data, filename):
import yaml
def float_representer(dumper, value):
text = '{0:.8f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
output_dict = []
for i, q_point in enumerate(quasiparticle_data['q_points']):
q_point_dict = {'reduced_wave_vector': q_point.tolist()}
q_point_dict.update({'frequencies': quasiparticle_data['frequencies'][i].tolist()})
q_point_dict.update({'linewidths': quasiparticle_data['linewidths'][i].tolist()})
q_point_dict.update({'frequency_shifts': quasiparticle_data['frequency_shifts'][i].tolist()})
#output_dict.update({'q_point_{}'.format(i): q_point_dict})
output_dict.append(q_point_dict)
with open(filename, 'w') as outfile:
yaml.dump(output_dict, outfile, default_flow_style=False)
def save_bands_data_to_file(bands_data, filename):
import yaml
def float_representer(dumper, value):
text = '{0:.8f}'.format(value)
return dumper.represent_scalar(u'tag:yaml.org,2002:float', text)
yaml.add_representer(float, float_representer)
with open(filename, 'w') as outfile:
yaml.dump(bands_data, outfile, default_flow_style=False) | 39.144909 | 209 | 0.618909 |
acf0babee62650794c7145a4907670c91477a8cc | 169 | py | Python | create_data_lists.py | bely66/Animal_Detection_SSD | 938a01a64ae6ba03ff037bb7d71e1763d1b04664 | [
"MIT"
] | null | null | null | create_data_lists.py | bely66/Animal_Detection_SSD | 938a01a64ae6ba03ff037bb7d71e1763d1b04664 | [
"MIT"
] | null | null | null | create_data_lists.py | bely66/Animal_Detection_SSD | 938a01a64ae6ba03ff037bb7d71e1763d1b04664 | [
"MIT"
] | null | null | null | from utils import create_data_lists
if __name__ == '__main__':
create_data_lists(animal_path='Boxing_KNPS_image/Labels/',
output_folder='./')
| 28.166667 | 62 | 0.674556 |
acf0bb809a0798b907cd2af0019d91ee525c1641 | 1,261 | py | Python | app/auth/forms.py | monicaoyugi/Pitch | 9ac865760884dd4514b60a415ce40ef22aa673ac | [
"MIT"
] | null | null | null | app/auth/forms.py | monicaoyugi/Pitch | 9ac865760884dd4514b60a415ce40ef22aa673ac | [
"MIT"
] | null | null | null | app/auth/forms.py | monicaoyugi/Pitch | 9ac865760884dd4514b60a415ce40ef22aa673ac | [
"MIT"
] | 1 | 2020-09-28T06:28:48.000Z | 2020-09-28T06:28:48.000Z | from flask_wtf import FlaskForm
from wtforms import StringField,PasswordField,SubmitField,BooleanField,ValidationError
from wtforms.validators import Required,Email,EqualTo
from ..models import User
class RegistrationForm(FlaskForm):
username = StringField('Username', validators = [Required()])
email = StringField(' Email Address', validators = [Required(), Email()])
password = PasswordField('Password', validators = [Required(),
EqualTo('password_confirm', message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords', validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self, data_field):
if User.query.filter_by(email = data_field.data).first():
raise ValidationError('There is an account with that email')
def validate_username(self, data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address', validators = [Required(), Email()])
password = PasswordField('Password', validators = [Required()])
remember = BooleanField('keep me logged?')
submit = SubmitField('Sign In') | 50.44 | 86 | 0.717684 |
acf0bc8f83266abbe6d593522bcdfa2ede6f9fba | 5,892 | py | Python | dict_attack.py | nikramakrishnan/distributed-dictionary-attack | 12efedd7e42cade57791a35ea29047fc07eb2cc2 | [
"MIT"
] | null | null | null | dict_attack.py | nikramakrishnan/distributed-dictionary-attack | 12efedd7e42cade57791a35ea29047fc07eb2cc2 | [
"MIT"
] | null | null | null | dict_attack.py | nikramakrishnan/distributed-dictionary-attack | 12efedd7e42cade57791a35ea29047fc07eb2cc2 | [
"MIT"
] | 1 | 2019-10-16T17:30:18.000Z | 2019-10-16T17:30:18.000Z | #
# Copyright (C) 2019 by
# Nikhil Ramakrishnan
#
# This project is licensed under the MIT License
import getpass
import sys
import ftp
import sql
import utils
conn = None
def main():
'''Main Function'''
# Print our logo
print(utils.get_logo())
print("Welcome to TensorBrute, NOOB.\n")
# Show options
print(utils.show_ops(),end='')
# input
opt = input()
if opt.strip() == '1':
newSession()
elif opt.strip() == '2':
existingSession()
elif opt.strip() == '3':
cleanUp()
elif opt.strip() == '4':
getDBinfo()
else:
print("Exiting...")
def newSession():
'''Start a new attack session'''
global conn
# connect to the DB if not already connected
connect()
# first check if any session is running
sess = sql.check_running_sessions(conn)
if sess is not None:
print("Session",sess,"is already running... Try resuming.")
end_session(2)
# no sessions running - start one
# generate an ID
id = utils.gen_id()
# get host IP and username from user
host = input("FTP Host to attack: ")
username = input("Target username: ")
# check FTP server
res = ftp.check_ftp(host)
if(res==0):
end_session(3)
# Start the session
print(utils.sess_setup())
done = sql.start_session(conn,id,host,username)
if not done:
end_session(5)
# clean up the flags
sql.clear_flags(conn)
# OK - finally ready to start attack
# print a nice message
print(utils.attack_start())
# attack!
attack_prog(id,host,username)
def existingSession():
'''Start a new attack session if existing session is running'''
global conn
# connect to the DB if not already connected
connect()
# look for existing sessions
sess = sql.check_running_sessions(conn)
if sess is None:
print(utils.no_session())
end_session(2)
# get host IP and username from user
host,username = sql.get_session_data(conn,sess)
# Ready to resume attack
# print a nice message
print(utils.attack_resume(host,username))
# attack!
attack_prog(sess,host,username)
def connect():
'''Connect to the database.'''
global conn
if conn is None:
print(utils.db_connect())
conn = sql.mysql_connect(sql.host,sql.user,sql.paswd,sql.dbname)
if conn is None:
end_session(2)
def attack_prog(id,host,username):
'''Start/resume an attack.'''
global conn
flag = False
# check if done after every 10 tries
count = 0
print(utils.progress_start())
while True:
# get a password
passwd = sql.get_password(conn)
if passwd is None:
end_session(1,id)
# attempt login
status = ftp.check_cred(host,username,passwd)
# check
if status==1:
# this is it!
flag = True
break
if status is None:
end_session(3)
# check if it's done!
if not sql.is_session_running(conn,id):
break
# Print a status update after 5 connections
if count%5 == 0:
print(utils.status_update(passwd))
count+=1
if flag:
sql.password_found(conn,id,passwd)
print()
print(utils.found_pass(host,username,passwd))
end_session(0)
else:
end_session(4)
print()
def end_session(status,id=None):
'''
End Program with optional status code.
Status = 0 : Success
Status = 1 : No password found in the current database.
Status = 2 : General exit.
Status = 3 : Error connecting to FTP server.
Status = 4 : Password found by this user or probably another user.
Status = 5 : SQL error.
'''
global conn
print()
# success
if status==0:
print("Congratulations. Use this power responsibly ;)")
# passwords have run out
elif status==1:
sql.rollback(conn,id)
print("We have run out of passwords to try, unforunately :(")
print("Try another Database? :)")
elif status==3:
print("Not able to connect to the FTP server. Check the IP/URL?")
elif status==4:
print("Session ended. Another system probably found the password!")
elif status==5:
print("SQL error.")
print("Goodbye!")
sys.exit(status)
def cleanUp():
'''Delete existing session and cleanup the database'''
global conn
print("\nAre you SURE you want to perform a clean up?")
print("This will STOP and DELETE any running session.")
s = input("Type 'YES' to confirm: ")
if(s.strip()=="YES"):
# connect to the DB if not already connected
connect()
print(utils.clean_message())
sql.clean(conn)
end_session(2)
else:
print("\nNo changes made. Exiting.")
end_session(2)
def getInfo(name,var,type=0):
'''Modifies the database credentials for current session'''
if type==0:
print(name, " (",var,"): ",sep='',end='')
temp_var = input()
if(temp_var.strip() != ''):
return temp_var.strip()
elif type==1:
temp_var = getpass.getpass(prompt=(name+':'))
confirm = getpass.getpass(prompt=("Confirm " + name+':'))
if(temp_var == confirm):
return temp_var
else:
print("\nPasswords do not match, try again.")
return var
def getDBinfo():
'''Get information about the database'''
print("\nEnter the following info (press enter to keep default):")
sql.host = getInfo("Database Host",sql.host)
sql.user = getInfo("DB Username",sql.user)
sql.paswd = getInfo("DB Password",sql.paswd,1)
sql.dbname = getInfo("Database Name",sql.dbname)
print("\n",utils.db_update_text,sep='')
main()
if __name__=="__main__":
main()
| 27.924171 | 75 | 0.603021 |
acf0bccb946516694072aee85893e814b5d061bd | 5,611 | py | Python | experiments/exp1c.py | jenninglim/multiscale-features | 54b3246cf138c9508e92f466e25cc4e778d0728a | [
"MIT"
] | 8 | 2020-01-23T21:08:53.000Z | 2021-07-17T04:44:26.000Z | experiments/exp1c.py | jenninglim/multiscale-features | 54b3246cf138c9508e92f466e25cc4e778d0728a | [
"MIT"
] | null | null | null | experiments/exp1c.py | jenninglim/multiscale-features | 54b3246cf138c9508e92f466e25cc4e778d0728a | [
"MIT"
] | 1 | 2021-01-06T16:24:34.000Z | 2021-01-06T16:24:34.000Z | import numpy as np
import logging
import multiprocessing as mp
import os
import sys
import time
import matplotlib.pyplot as plt
import argparse
from functools import partial
import logging
from mskernel import hsic
from mskernel import kernel
from mskernel import util
from mskernel.featsel import MultiSel, PolySel
### Problems
from problem import *
os.system("taskset -p 0xff %d" % os.getpid())
nsamples_lin = [500, 1000, 1500, 2000]
def one_trial(i, n_samples, algsel, problem, n_select, hsic_e, params):
p,r = problem.sample(n_samples, i)
p_bw = util.meddistance(p, subsample=1000) **2
r_bw = util.meddistance(r, subsample=1000) **2
hsic_e = hsic_e(kernel.KGauss(p_bw),kernel.KGauss(r_bw), params(n_samples))
feat_select = algsel(hsic_e, params=True)
results = feat_select.test(p,r, args=n_select, seed=i)
## True selected features.
if results['sel_vars'].shape[0] > 1:
true = problem.is_true(results['sel_vars'])
n_true = np.sum(true)
fpr = np.sum(results['h0_rejs'][np.logical_not(true)])/max(n_select-n_true,1)
tpr = np.sum(results['h0_rejs'][true])/max(n_true,1)
else:
tpr, fpr = 0, 0
logging.debug("TPR is :{0:.3f} FPR is :{1:.3f}".format(tpr, fpr))
return tpr, fpr
def independent_job(problem, n_samples, n_select, algsel, hsic_e, pool, n_repeats,
params):
result = pool.map_async(partial(one_trial,
n_samples=n_samples,
algsel=algsel,
problem=problem,
n_select=n_select,
hsic_e=hsic_e,
params=params,
),
[i for i in range(n_repeats)])
res = result.get()
tpr, fpr = zip(*res)
tpr = { 'mean': np.sum(tpr)/n_repeats,
'sd' : np.sqrt(np.var(tpr)/n_repeats) }
fpr = { 'mean': np.sum(fpr)/n_repeats,
'sd' : np.sqrt(np.var(fpr)/n_repeats)}
logging.debug("Overall TPR is :{0:.3f} ".format(tpr['mean']) + " " + \
"Its variance is :{0:.3f} ".format(tpr['sd']))
logging.debug("Overall FPR is :{0:.3f} ".format(fpr['mean']) + " " + \
"Its variance is :{0:.3f} ".format(fpr['sd']))
return tpr, fpr
def params_block(n):
return 5#int(np.floor(np.sqrt(n)))
def params_inc(n):
return 1
def runExperiments(problem, n_select, n_dim, algorithm, dist, n_repeats, threads):
assert(n_select <= n_dim)
if algorithm == 'MultiSel':
algsel = MultiSel
elif algorithm == 'PolySel':
algsel = PolySel
else:
assert(0==1)
if 'Logit' in problem:
from math import floor
n_true = 10
sampler = Logit(n_true, n_dim)
else:
assert(0==1)
if 'Inc' in dist:
hsic_e = hsic.HSIC_Inc
params = params_inc
elif 'Block' in dist:
hsic_e = hsic.HSIC_Block
params = params_block
else:
assert(0==1)
## Distributed trials.
with mp.Pool(threads) as pool:
results = []
for n_samples in nsamples_lin:
results.append(independent_job(sampler,
n_samples,
n_select,
algsel,
hsic_e,
pool,
n_repeats,
params))
tpr, fpr = zip(*results)
setting = { 'dim': n_dim,
'nsample_lin' : nsamples_lin,
'problem': problem,
'algorithm': algorithm,
'HSIC' : dist,
'n_select': n_select}
results = { 'setting':setting,
'x_lin' : nsamples_lin,
'fpr' : fpr,
'tpr' : tpr,}
logging.info("Setting : {0}".format(setting))
logging.info("Results : {0}".format(results))
np.save("temp/Ex4_{0}_{1}_{2}_{3}_{4}_{5}".format(n_repeats,
problem,
dist,
algorithm,
n_select,
n_dim),
results)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p','--problem',
default = 'Logit',
type = str,
help = 'Problem setting: MS, VS.')
parser.add_argument('-s','--n_select',
default = 30,
type = int,
help = 'Number of features to select')
parser.add_argument('-d','--n_dim',
default = 50,
type = int,
help = 'Number of dimension of the problem')
parser.add_argument('-a','--algorithm',
default = 'PolySel',
type = str,
help = 'Algorithm to use: PolySel, MultiSel.')
parser.add_argument('-r','--n_repeats',
default = 100,
type = int,
help = 'Number of trials or "r"epeats.')
parser.add_argument('-t','--threads',
default = 5,
type = int,
help = 'Number of threads.')
parser.add_argument('-e','--estimator',
default = 'Block',
type = str,
help = 'MMD Estimator: Inc, Block')
parser.add_argument('-v','--verbose',
default = 0,
type = int,
help = 'Verbose level: 0, 1, 2.')
args = parser.parse_args()
if args.verbose == 0:
logging.getLogger().setLevel(logging.WARNING)
elif args.verbose == 1:
logging.getLogger().setLevel(logging.INFO)
elif args.verbose == 2:
logging.getLogger().setLevel(logging.DEBUG)
runExperiments(
args.problem,
args.n_select,
args.n_dim,
args.algorithm,
args.estimator,
args.n_repeats,
args.threads)
| 27.10628 | 85 | 0.553377 |
acf0bdc3003eed70e67c4f9020ce995c866f3ecc | 4,646 | py | Python | aristaeus/aristaeus/api/v1/views/swarms.py | Yo-main/akingbee.com | 144940df99900226073eb4bf721a6ab407a3911d | [
"MIT"
] | null | null | null | aristaeus/aristaeus/api/v1/views/swarms.py | Yo-main/akingbee.com | 144940df99900226073eb4bf721a6ab407a3911d | [
"MIT"
] | 20 | 2019-10-06T20:24:49.000Z | 2022-02-28T01:55:49.000Z | aristaeus/aristaeus/api/v1/views/swarms.py | yo-main/akingbee | a8b4b307e2262f98eb93459c6dd4207e707cee1e | [
"MIT"
] | null | null | null | """API endpoints for hive"""
import datetime
from typing import List
import uuid
from fastapi import APIRouter, Depends, Cookie, HTTPException
from sqlalchemy.orm import Session
from gaea.log import logger
from gaea.models import Swarms, SwarmHealthStatuses
from gaea.webapp.utils import get_session
from aristaeus.helpers.common import validate_uuid
from gaea.helpers.auth import get_logged_in_user
from aristaeus.models import SwarmModel, SwarmPostModel, SwarmPutModel
router = APIRouter()
@router.get("/swarms", status_code=200, response_model=List[SwarmModel])
async def get_swarms(
access_token: str = Cookie(None), session: Session = Depends(get_session)
):
"""
Get a list of swarms
"""
user_id = await get_logged_in_user(access_token)
swarms = (
session.query(Swarms)
.filter(Swarms.user_id == user_id, Swarms.deleted_at.is_(None))
.all()
)
logger.info(
"Get list of swarms successfull", user_id=user_id, nb_swarms=len(swarms)
)
return swarms
@router.post("/swarm", status_code=200, response_model=SwarmModel)
async def post_swarm(
data: SwarmPostModel,
access_token: str = Cookie(None),
session: Session = Depends(get_session),
):
"""
Create an Swarm object and return it as json
"""
user_id = await get_logged_in_user(access_token)
logger.info("Post swarm received", user_id=user_id, payload=data)
swarm = Swarms(
health_status_id=data.health_status_id,
queen_year=data.queen_year,
user_id=user_id,
)
session.add(swarm)
try:
session.commit()
except Exception as exc:
logger.exception("Database error", swarm=swarm)
raise HTTPException(
status_code=400, detail="Couldn't save the swarm in database"
) from exc
logger.info("Post swarm successfull", user_id=user_id, swarm_id=swarm.id)
return swarm
@router.put("/swarm/{swarm_id}", status_code=204)
async def put_swarm(
data: SwarmPutModel,
swarm_id: uuid.UUID,
access_token: str = Cookie(None),
session: Session = Depends(get_session),
):
"""
Modify a swarm
"""
user_id = await get_logged_in_user(access_token)
logger.info("Put swarm received", user_id=user_id, swarm_id=swarm_id)
swarm = session.query(Swarms).get(swarm_id)
if swarm is None or swarm.deleted_at or swarm.user_id != user_id:
raise HTTPException(status_code=404, detail="Swarm not found")
if not (data.health_status_id or data.queen_year):
raise HTTPException(status_code=400, detail="No data provided")
swarm.health_status_id = data.health_status_id or swarm.health_status_id
swarm.queen_year = data.queen_year or swarm.queen_year
try:
session.commit()
except Exception as exc:
logger.exception("Database error", swarm=swarm)
raise HTTPException(
status_code=400, detail="Couldn't update the swarm in database"
) from exc
logger.info("Put swarm successfull", user_id=user_id, swarm_id=swarm_id)
@router.delete("/swarm/{swarm_id}", status_code=204)
async def delete_swarm(
swarm_id: uuid.UUID,
access_token: str = Cookie(None),
session: Session = Depends(get_session),
):
"""
Delete a swarm
"""
user_id = await get_logged_in_user(access_token)
logger.info("Delete swarm received", user_id=user_id, swarm_id=swarm_id)
swarm = session.query(Swarms).get(swarm_id)
if swarm is None or swarm.deleted_at:
raise HTTPException(status_code=404, detail="Swarm not found")
if swarm.user_id != user_id:
raise HTTPException(status_code=403)
try:
swarm.deleted_at = datetime.datetime.now()
swarm.hive = None
session.commit()
except Exception as exc:
logger.exception("Database error", swarm=swarm)
raise HTTPException(
status_code=400, detail="Couldn't delete the swarm in database"
) from exc
logger.info("Delete swarm successfull", user_id=user_id, swarm_id=swarm_id)
@router.get("/swarm/{swarm_id}", status_code=200, response_model=SwarmModel)
async def get_swarm(
swarm_id: uuid.UUID,
access_token: str = Cookie(None),
session: Session = Depends(get_session),
):
"""
Get a swarm
"""
user_id = await get_logged_in_user(access_token)
swarm = session.query(Swarms).get(swarm_id)
if swarm is None or swarm.deleted_at or swarm.user_id != user_id:
raise HTTPException(status_code=404, detail="Swarm not found")
logger.info("Get swarm successfull", user_id=user_id, swarm_id=swarm_id)
return swarm
| 29.782051 | 80 | 0.696083 |
acf0bdc8e7f0b282e9916a951f7a4d1d08bea897 | 17,306 | py | Python | zerver/openapi/markdown_extension.py | luisogandob/zulip | f6667b8e80b4658b8d2a5ae6b3f0c9d6eac4a2d6 | [
"Apache-2.0"
] | 1 | 2020-08-27T20:05:42.000Z | 2020-08-27T20:05:42.000Z | zerver/openapi/markdown_extension.py | luisogandob/zulip | f6667b8e80b4658b8d2a5ae6b3f0c9d6eac4a2d6 | [
"Apache-2.0"
] | 1 | 2020-07-15T21:20:34.000Z | 2020-07-15T21:23:47.000Z | zerver/openapi/markdown_extension.py | luisogandob/zulip | f6667b8e80b4658b8d2a5ae6b3f0c9d6eac4a2d6 | [
"Apache-2.0"
] | 3 | 2020-07-06T22:58:27.000Z | 2020-07-10T15:51:46.000Z | import inspect
import json
import re
from typing import Any, Dict, List, Optional, Pattern, Tuple
import markdown
from django.conf import settings
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
import zerver.openapi.python_examples
from zerver.openapi.openapi import get_openapi_description, get_openapi_fixture, openapi_spec
MACRO_REGEXP = re.compile(
r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
PYTHON_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
JS_EXAMPLE_REGEX = re.compile(r'\/\/ \{code_example\|\s*(.+?)\s*\}')
MACRO_REGEXP_DESC = re.compile(r'\{generate_api_description(\(\s*(.+?)\s*\))}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
JS_CLIENT_CONFIG = """
const Zulip = require('zulip-js');
// Pass the path to your zuliprc file here.
const config = { zuliprc: 'zuliprc' };
"""
JS_CLIENT_ADMIN_CONFIG = """
const Zulip = require('zulip-js');
// The user for this zuliprc file must be an organization administrator.
const config = { zuliprc: 'zuliprc-admin' };
"""
DEFAULT_AUTH_EMAIL = "BOT_EMAIL_ADDRESS"
DEFAULT_AUTH_API_KEY = "BOT_API_KEY"
DEFAULT_EXAMPLE = {
"integer": 1,
"string": "demo",
"boolean": False,
}
def parse_language_and_options(input_str: Optional[str]) -> Tuple[str, Dict[str, Any]]:
if not input_str:
return ("", {})
language_and_options = re.match(r"(?P<language>\w+)(,\s*(?P<options>[\"\'\w\d\[\],= ]+))?", input_str)
assert(language_and_options is not None)
kwargs_pattern = re.compile(r"(?P<key>\w+)\s*=\s*(?P<value>[\'\"\w\d]+|\[[\'\",\w\d ]+\])")
language = language_and_options.group("language")
assert(language is not None)
if language_and_options.group("options"):
_options = kwargs_pattern.finditer(language_and_options.group("options"))
options = {}
for m in _options:
options[m.group("key")] = json.loads(m.group("value").replace("'", '"'))
return (language, options)
return (language, {})
def extract_code_example(source: List[str], snippet: List[Any],
example_regex: Pattern[str]) -> List[Any]:
start = -1
end = -1
for line in source:
match = example_regex.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.append(source[start + 1: end])
source = source[end + 1:]
return extract_code_example(source, snippet, example_regex)
def render_python_code_example(function: str, admin_config: bool=False,
**kwargs: Any) -> List[str]:
method = zerver.openapi.python_examples.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippets = extract_code_example(function_source_lines, [], PYTHON_EXAMPLE_REGEX)
code_example = []
code_example.append('```python')
code_example.extend(config)
for snippet in snippets:
for line in snippet:
# Remove one level of indentation and strip newlines
code_example.append(line[4:].rstrip())
code_example.append('print(result)')
code_example.append('\n')
code_example.append('```')
return code_example
def render_javascript_code_example(function: str, admin_config: bool=False,
**kwargs: Any) -> List[str]:
function_source_lines = []
with open('zerver/openapi/javascript_examples.js') as f:
parsing = False
for line in f:
if line.startswith("}"):
parsing = False
if parsing:
function_source_lines.append(line.rstrip())
if line.startswith("add_example(") and function in line:
parsing = True
snippets = extract_code_example(function_source_lines, [], JS_EXAMPLE_REGEX)
if admin_config:
config = JS_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = JS_CLIENT_CONFIG.splitlines()
code_example = []
code_example.append('```js')
code_example.extend(config)
for snippet in snippets:
code_example.append("Zulip(config).then(async (client) => {")
for line in snippet:
result = re.search('const result.*=(.*);', line)
if result:
line = f" return{result.group(1)};"
# Strip newlines
code_example.append(line.rstrip())
code_example.append("}).then(console.log).catch(console.err);")
code_example.append(" ")
code_example.append('```')
return code_example
def curl_method_arguments(endpoint: str, method: str,
api_url: str) -> List[str]:
# We also include the -sS verbosity arguments here.
method = method.upper()
url = f"{api_url}/v1{endpoint}"
valid_methods = ["GET", "POST", "DELETE", "PUT", "PATCH", "OPTIONS"]
if method == "GET":
# Then we need to make sure that each -d option translates to becoming
# a GET parameter (in the URL) and not a POST parameter (in the body).
# TODO: remove the -X part by updating the linting rule. It's redundant.
return ["-sSX", "GET", "-G", url]
elif method in valid_methods:
return ["-sSX", method, url]
else:
msg = f"The request method {method} is not one of {valid_methods}"
raise ValueError(msg)
def get_openapi_param_example_value_as_string(endpoint: str, method: str, param: Dict[str, Any],
curl_argument: bool=False) -> str:
jsonify = False
param_name = param["name"]
if "content" in param:
param = param["content"]["application/json"]
jsonify = True
if "type" in param["schema"]:
param_type = param["schema"]["type"]
else:
# Hack: Ideally, we'd extract a common function for handling
# oneOf values in types and do something with the resulting
# union type. But for this logic's purpose, it's good enough
# to just check the first parameter.
param_type = param["schema"]["oneOf"][0]["type"]
if param_type in ["object", "array"]:
example_value = param.get("example", None)
if not example_value:
msg = f"""All array and object type request parameters must have
concrete examples. The openAPI documentation for {endpoint}/{method} is missing an example
value for the {param_name} parameter. Without this we cannot automatically generate a
cURL example."""
raise ValueError(msg)
ordered_ex_val_str = json.dumps(example_value, sort_keys=True)
# We currently don't have any non-JSON encoded arrays.
assert(jsonify)
if curl_argument:
return f" --data-urlencode {param_name}='{ordered_ex_val_str}'"
return ordered_ex_val_str # nocoverage
else:
example_value = param.get("example", DEFAULT_EXAMPLE[param_type])
if isinstance(example_value, bool):
example_value = str(example_value).lower()
if jsonify:
example_value = json.dumps(example_value)
if curl_argument:
return f" -d '{param_name}={example_value}'"
return example_value
def generate_curl_example(endpoint: str, method: str,
api_url: str,
auth_email: str=DEFAULT_AUTH_EMAIL,
auth_api_key: str=DEFAULT_AUTH_API_KEY,
exclude: Optional[List[str]]=None,
include: Optional[List[str]]=None) -> List[str]:
if exclude is not None and include is not None:
raise AssertionError("exclude and include cannot be set at the same time.")
lines = ["```curl"]
operation = endpoint + ":" + method.lower()
operation_entry = openapi_spec.spec()['paths'][endpoint][method.lower()]
global_security = openapi_spec.spec()['security']
operation_params = operation_entry.get("parameters", [])
operation_request_body = operation_entry.get("requestBody", None)
operation_security = operation_entry.get("security", None)
if settings.RUNNING_OPENAPI_CURL_TEST: # nocoverage
from zerver.openapi.curl_param_value_generators import patch_openapi_example_values
operation_params, operation_request_body = patch_openapi_example_values(operation, operation_params,
operation_request_body)
format_dict = {}
for param in operation_params:
if param["in"] != "path":
continue
example_value = get_openapi_param_example_value_as_string(endpoint, method, param)
format_dict[param["name"]] = example_value
example_endpoint = endpoint.format_map(format_dict)
curl_first_line_parts = ["curl"] + curl_method_arguments(example_endpoint, method,
api_url)
lines.append(" ".join(curl_first_line_parts))
insecure_operations = ['/dev_fetch_api_key:post']
if operation_security is None:
if global_security == [{'basicAuth': []}]:
authentication_required = True
else:
raise AssertionError("Unhandled global securityScheme."
+ " Please update the code to handle this scheme.")
elif operation_security == []:
if operation in insecure_operations:
authentication_required = False
else:
raise AssertionError("Unknown operation without a securityScheme. "
+ "Please update insecure_operations.")
else:
raise AssertionError("Unhandled securityScheme. Please update the code to handle this scheme.")
if authentication_required:
lines.append(f" -u {auth_email}:{auth_api_key}")
for param in operation_params:
if param["in"] == "path":
continue
param_name = param["name"]
if include is not None and param_name not in include:
continue
if exclude is not None and param_name in exclude:
continue
example_value = get_openapi_param_example_value_as_string(endpoint, method, param,
curl_argument=True)
lines.append(example_value)
if "requestBody" in operation_entry:
properties = operation_entry["requestBody"]["content"]["multipart/form-data"]["schema"]["properties"]
for key, property in properties.items():
lines.append(' -F "{}=@{}"'.format(key, property["example"]))
for i in range(1, len(lines)-1):
lines[i] = lines[i] + " \\"
lines.append("```")
return lines
def render_curl_example(function: str, api_url: str,
exclude: Optional[List[str]]=None,
include: Optional[List[str]]=None) -> List[str]:
""" A simple wrapper around generate_curl_example. """
parts = function.split(":")
endpoint = parts[0]
method = parts[1]
kwargs: Dict[str, Any] = dict()
if len(parts) > 2:
kwargs["auth_email"] = parts[2]
if len(parts) > 3:
kwargs["auth_api_key"] = parts[3]
kwargs["api_url"] = api_url
kwargs["exclude"] = exclude
kwargs["include"] = include
return generate_curl_example(endpoint, method, **kwargs)
SUPPORTED_LANGUAGES: Dict[str, Any] = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
},
'curl': {
'render': render_curl_example,
},
'javascript': {
'client_config': JS_CLIENT_CONFIG,
'admin_config': JS_CLIENT_ADMIN_CONFIG,
'render': render_javascript_code_example,
},
}
class APIMarkdownExtension(Extension):
def __init__(self, api_url: Optional[str]) -> None:
self.config = {
'api_url': [
api_url,
'API URL to use when rendering curl examples',
],
}
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin',
)
md.preprocessors.add(
'generate_api_description', APIDescriptionPreprocessor(md, self.getConfigs()), '_begin',
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super().__init__(md)
self.api_url = config['api_url']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language, options = parse_language_and_options(match.group(2))
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if self.api_url is None:
raise AssertionError("Cannot render curl API examples without API URL set.")
options['api_url'] = self.api_url
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function, **options)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('``` json')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
class APIDescriptionPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super().__init__(md)
self.api_url = config['api_url']
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP_DESC.search(line)
if match:
function = match.group(2)
text = self.render_description(function)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP_DESC.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_description(self, function: str) -> List[str]:
description: List[str] = []
path, method = function.rsplit(':', 1)
description_dict = get_openapi_description(path, method)
description_dict = description_dict.replace('{{api_url}}', self.api_url)
description.extend(description_dict.splitlines())
return description
def makeExtension(*args: Any, **kwargs: str) -> APIMarkdownExtension:
return APIMarkdownExtension(*args, **kwargs)
| 38.54343 | 109 | 0.600023 |
acf0bf0011325698a763ae298640e338ec69449c | 8,052 | py | Python | invenio_communities/communities/resources/resource.py | lhenze/invenio-communities | 471abcf6b4429306ab39cc0c334cd78911a2dfb2 | [
"MIT"
] | null | null | null | invenio_communities/communities/resources/resource.py | lhenze/invenio-communities | 471abcf6b4429306ab39cc0c334cd78911a2dfb2 | [
"MIT"
] | null | null | null | invenio_communities/communities/resources/resource.py | lhenze/invenio-communities | 471abcf6b4429306ab39cc0c334cd78911a2dfb2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2021 CERN.
# Copyright (C) 2021 Northwestern University.
# Copyright (C) 2022 Graz University of Technology.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio Communities Resource API."""
from flask import g
from flask_resources import (
from_conf,
request_parser,
resource_requestctx,
response_handler,
route,
)
from invenio_records_resources.resources.files.resource import request_stream
from invenio_records_resources.resources.records.resource import (
RecordResource,
request_data,
request_headers,
request_search_args,
request_view_args,
)
from invenio_records_resources.resources.records.utils import es_preference
request_community_requests_search_args = request_parser(
from_conf("request_community_requests_search_args"), location="args"
)
class CommunityResource(RecordResource):
"""Communities resource."""
def create_url_rules(self):
"""Create the URL rules for the record resource."""
def p(prefix, route):
"""Prefix a route with the URL prefix."""
return f"{prefix}{route}"
routes = self.config.routes
return [
route("GET", p(routes["communities-prefix"], routes["list"]), self.search),
route("POST", p(routes["communities-prefix"], routes["list"]), self.create),
route("GET", p(routes["communities-prefix"], routes["item"]), self.read),
route("PUT", p(routes["communities-prefix"], routes["item"]), self.update),
route(
"DELETE", p(routes["communities-prefix"], routes["item"]), self.delete
),
route(
"GET",
p(routes["user-prefix"], routes["list"]),
self.search_user_communities,
),
route(
"POST",
p(routes["communities-prefix"], routes["item"]) + "/rename",
self.rename,
),
route(
"GET",
p(routes["communities-prefix"], routes["item"]) + "/logo",
self.read_logo,
),
route(
"PUT",
p(routes["communities-prefix"], routes["item"]) + "/logo",
self.update_logo,
),
route(
"DELETE",
p(routes["communities-prefix"], routes["item"]) + "/logo",
self.delete_logo,
),
route(
"GET",
p(routes["communities-prefix"], routes["featured-prefix"]),
self.featured_communities_search,
),
route(
"GET",
p(routes["communities-prefix"], routes["item"])
+ routes["featured-prefix"],
self.featured_list,
),
route(
"POST",
p(routes["communities-prefix"], routes["item"])
+ routes["featured-prefix"],
self.featured_create,
),
route(
"PUT",
p(routes["communities-prefix"], routes["item"])
+ p(routes["featured-prefix"], routes["featured-id"]),
self.featured_update,
),
route(
"DELETE",
p(routes["communities-prefix"], routes["item"])
+ p(routes["featured-prefix"], routes["featured-id"]),
self.featured_delete,
),
route(
"GET",
p(
routes["communities-prefix"],
routes["item"] + routes["community-requests"],
),
self.search_community_requests,
),
]
@request_search_args
@response_handler(many=True)
def search_user_communities(self):
"""Perform a search over the user's communities.
GET /user/communities
"""
hits = self.service.search_user_communities(
identity=g.identity,
params=resource_requestctx.args,
es_preference=es_preference(),
)
return hits.to_dict(), 200
@request_view_args
@request_community_requests_search_args
@response_handler(many=True)
def search_community_requests(self):
"""Perform a search over the community's requests.
GET /communities/<pid_value>/requests
"""
hits = self.service.search_community_requests(
identity=g.identity,
community_id=resource_requestctx.view_args["pid_value"],
params=resource_requestctx.args,
es_preference=es_preference(),
)
return hits.to_dict(), 200
@request_headers
@request_view_args
@request_data
@response_handler()
def rename(self):
"""Rename a community."""
item = self.service.rename(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.data,
revision_id=resource_requestctx.headers.get("if_match"),
)
return item.to_dict(), 200
@request_view_args
def read_logo(self):
"""Read logo's content."""
item = self.service.read_logo(
g.identity,
resource_requestctx.view_args["pid_value"],
)
return item.send_file(), 200
@request_view_args
@request_stream
@response_handler()
def update_logo(self):
"""Upload logo content."""
item = self.service.update_logo(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.data["request_stream"],
content_length=resource_requestctx.data["request_content_length"],
)
return item.to_dict(), 200
@request_view_args
def delete_logo(self):
"""Delete logo."""
self.service.delete_logo(
g.identity,
resource_requestctx.view_args["pid_value"],
)
return "", 204
@request_search_args
@response_handler(many=True)
def featured_communities_search(self):
"""Features communities search."""
hits = self.service.featured_search(
identity=g.identity,
params=resource_requestctx.args,
es_preference=es_preference(),
)
return hits.to_dict(), 200
@request_headers
@request_view_args
@response_handler()
def featured_list(self):
"""List featured entries for a community."""
items = self.service.featured_list(
g.identity,
resource_requestctx.view_args["pid_value"],
)
return items.to_dict(), 200
@request_headers
@request_view_args
@request_data
@response_handler()
def featured_create(self):
"""Create a featured community entry."""
item = self.service.featured_create(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.data,
)
return item.to_dict(), 201
@request_headers
@request_view_args
@request_data
@response_handler()
def featured_update(self):
"""Update a featured community entry."""
item = self.service.featured_update(
g.identity,
resource_requestctx.view_args["pid_value"],
resource_requestctx.data,
featured_id=resource_requestctx.view_args["featured_id"],
)
return item.to_dict(), 200
@request_view_args
def featured_delete(self):
"""Delete a featured community entry."""
self.service.featured_delete(
g.identity,
resource_requestctx.view_args["pid_value"],
featured_id=resource_requestctx.view_args["featured_id"],
)
return "", 204
| 31.952381 | 88 | 0.571038 |
acf0bf5582122da1adb412a736cadcb341f0eedc | 34,174 | py | Python | movo_common/movo_ros/src/movo_joint_interface/jaco_joint_controller.py | zkytony/kinova-movo | 37d7454b2dc589d44133f3913f567b9cc321a66d | [
"BSD-3-Clause"
] | 1 | 2021-06-24T19:20:01.000Z | 2021-06-24T19:20:01.000Z | movo_common/movo_ros/src/movo_joint_interface/jaco_joint_controller.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | null | null | null | movo_common/movo_ros/src/movo_joint_interface/jaco_joint_controller.py | ALAN-NUS/kinova_movo | 05a0451f5c563359ae0ffe3280e1df85caec9e55 | [
"BSD-3-Clause"
] | 1 | 2020-01-21T11:05:24.000Z | 2020-01-21T11:05:24.000Z | """--------------------------------------------------------------------
Copyright (c) 2017, Kinova Robotics inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\file jaco_joint_controller.py
\brief This module contains a collection of functions low level interface
to the Kinova API.
\Platform: Ubuntu 16.04 LTS / ROS Kinetic
--------------------------------------------------------------------"""
from ctypes import *
import rospy
from movo_msgs.msg import (
JacoCartesianVelocityCmd,
JacoAngularVelocityCmd6DOF,
JacoAngularVelocityCmd7DOF,
KinovaActuatorFdbk,
JacoStatus
)
from sensor_msgs.msg import JointState
from control_msgs.msg import JointTrajectoryControllerState
from std_msgs.msg import Float32, String
import threading
import math
from angles import *
from helpers import *
from jaco_joint_pid import JacoPID
from kinova_api_wrapper import *
import operator
class SIArmController(object):
# Enum for arm control modes
# Traectory is the default mode - supports specifying angular or cartesian
# position trajectories with optional soft velocity and acceleration
# constraints. A PID controller in this class tracks the setpoints and
# commands the requisite underlying angular velocities
TRAJECTORY = 0
# Angular velocity mode - angular velocities are passed directly through
# to the lower layer API
ANGULAR_VELOCITY = 1
# Cartesian velocity mode - cartesian velocities are passed directly
# through to the lower layer API
CARTESIAN_VELOCITY = 3
def __init__(self, prefix="", gripper="", interface='eth0', jaco_ip="10.66.171.15", dof=""):
"""
Constructor
"""
# Setup a lock for accessing data in the control loop
self._lock = threading.Lock()
# Assume success until posted otherwise
rospy.loginfo('Starting JACO2 control')
self.init_success = True
self._prefix = prefix
self.iface = interface
self.arm_dof = dof
self.gripper=gripper
# List of joint names
if ("6dof"== self.arm_dof):
self._joint_names = [self._prefix+'_shoulder_pan_joint',
self._prefix+'_shoulder_lift_joint',
self._prefix+'_elbow_joint',
self._prefix+'_wrist_1_joint',
self._prefix+'_wrist_2_joint',
self._prefix+'_wrist_3_joint']
elif ("7dof"== self.arm_dof):
self._joint_names = [self._prefix + '_shoulder_pan_joint',
self._prefix + '_shoulder_lift_joint',
self._prefix + '_arm_half_joint',
self._prefix + '_elbow_joint',
self._prefix + '_wrist_spherical_1_joint',
self._prefix + '_wrist_spherical_2_joint',
self._prefix + '_wrist_3_joint']
else:
rospy.logerr("DoF needs to be set 6 or 7, cannot start SIArmController")
return
self._num_joints = len(self._joint_names)
# Create the hooks for the API
if ('left' == prefix):
self.api = KinovaAPI('left',self.iface,jaco_ip,'255.255.255.0',24000,24024,44000, self.arm_dof)
elif ('right' == prefix):
self.api = KinovaAPI('right',self.iface,jaco_ip,'255.255.255.0',25000,25025,55000, self.arm_dof)
else:
rospy.logerr("prefix needs to be set to left or right, cannot start the controller")
return
if not (self.api.init_success):
self.Stop()
return
self.api.SetCartesianControl()
self._position_hold = False
self.estop = False
# Initialize the joint feedback
pos = self.api.get_angular_position()
vel = self.api.get_angular_velocity()
force = self.api.get_angular_force()
self._joint_fb = dict()
self._joint_fb['position'] = pos[:self._num_joints]
self._joint_fb['velocity'] = vel[:self._num_joints]
self._joint_fb['force'] = force[:self._num_joints]
if ("kg2" == gripper) or ("rq85" == gripper):
self._gripper_joint_names = [self._prefix+'_gripper_finger1_joint',
self._prefix+'_gripper_finger2_joint']
self.num_fingers = 2
elif ("kg3" == gripper):
self._gripper_joint_names = [self._prefix+'_gripper_finger1_joint',
self._prefix+'_gripper_finger2_joint',
self._prefix+'_gripper_finger3_joint']
self.num_fingers = 3
if (0 != self.num_fingers):
self._gripper_fb = dict()
self._gripper_fb['position'] = pos[self._num_joints:self._num_joints+self.num_fingers]
self._gripper_fb['velocity'] = vel[self._num_joints:self._num_joints+self.num_fingers]
self._gripper_fb['force'] = force[self._num_joints:self._num_joints+self.num_fingers]
"""
Reset gravity vector to [0.0 9.81 0.0], along with positive y axis of kinova_arm base
"""
self.api.set_gravity_vector(0.0, 9.81, 0.0)
"""
Register the publishers and subscribers
"""
self.last_cartesian_vel_cmd_update = rospy.get_time()-0.5
# X, Y, Z, ThetaX, ThetaY, ThetaZ, FingerVel
self._last_cartesian_vel_cmd = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self._cartesian_vel_cmd_sub = rospy.Subscriber(
"/movo/%s_arm/cartesian_vel_cmd" % self._prefix,
JacoCartesianVelocityCmd,
self._update_cartesian_vel_cmd
)
self.last_angular_vel_cmd_update = rospy.get_time()-0.5
self._last_angular_vel_cmd = [0.0] * (self._num_joints + self.num_fingers)
if "6dof"== self.arm_dof:
self._angular_vel_cmd_sub = rospy.Subscriber(
"/movo/%s_arm/angular_vel_cmd" % self._prefix,
JacoAngularVelocityCmd6DOF,
self._update_angular_vel_cmd
)
elif "7dof" == self.arm_dof:
self._angular_vel_cmd_sub = rospy.Subscriber(
"/movo/%s_arm/angular_vel_cmd" % self._prefix,
JacoAngularVelocityCmd7DOF,
self._update_angular_vel_cmd
)
else:
# Error condition
rospy.logerr("DoF needs to be set 6 or 7, was {}".format(self.arm_dof))
self.Stop()
return
self._gripper_vel_cmd = 0.0
self._ctl_mode = SIArmController.TRAJECTORY
self.api.set_control_mode(KinovaAPI.ANGULAR_CONTROL)
self._jstpub = rospy.Publisher("/movo/%s_arm_controller/state"%self._prefix,JointTrajectoryControllerState,queue_size=10)
self._jstmsg = JointTrajectoryControllerState()
self._jstmsg.header.seq = 0
self._jstmsg.header.frame_id = ''
self._jstmsg.header.stamp = rospy.get_rostime()
self._jspub = rospy.Publisher("/movo/%s_arm/joint_states"%self._prefix,JointState,queue_size=10)
self._jsmsg = JointState()
self._jsmsg.header.seq = 0
self._jsmsg.header.frame_id = ''
self._jsmsg.header.stamp = rospy.get_rostime()
self._jsmsg.name = self._joint_names
self._actfdbk_pub = rospy.Publisher("/movo/%s_arm/actuator_feedback"%self._prefix,KinovaActuatorFdbk,queue_size=10)
self._actfdbk_msg = KinovaActuatorFdbk()
self._jsmsg.header.seq = 0
self._jsmsg.header.frame_id = ''
self._jsmsg.header.stamp = rospy.get_rostime()
if (0 != self.num_fingers):
self._gripper_vel_cmd_sub = rospy.Subscriber("/movo/%s_gripper/vel_cmd"%self._prefix,Float32,self._update_gripper_vel_cmd)
if ("rq85" != gripper):
self._gripper_jspub = rospy.Publisher("/movo/%s_gripper/joint_states"%self._prefix,JointState,queue_size=10)
self._gripper_jsmsg = JointState()
self._gripper_jsmsg.header.seq = 0
self._gripper_jsmsg.header.frame_id = ''
self._gripper_jsmsg.header.stamp = rospy.get_rostime()
self._gripper_jsmsg.name = self._gripper_joint_names
self._cartesianforce_pub = rospy.Publisher("/movo/%s_arm/cartesianforce"%self._prefix, JacoCartesianVelocityCmd, queue_size=10)
self._cartesianforce_msg = JacoCartesianVelocityCmd()
self._cartesianforce_msg.header.seq = 0
self._cartesianforce_msg.header.frame_id = ''
self._cartesianforce_msg.header.stamp = rospy.get_rostime()
self._angularforce_gravityfree_pub = rospy.Publisher("/movo/%s_arm/angularforce_gravityfree"%self._prefix, JacoStatus, queue_size=10)
self._angularforce_gravityfree_msg = JacoStatus()
self._angularforce_gravityfree_msg.header.seq = 0
self._angularforce_gravityfree_msg.header.frame_id = ''
self._angularforce_gravityfree_msg.header.stamp = rospy.get_rostime()
self._angularforce_gravityfree_msg.type = "angularforce_gravityfree"
"""
This starts the controller in cart vel mode so that teleop is active by default
"""
if (0 != self.num_fingers):
self._gripper_pid = [None]*self.num_fingers
for i in range(self.num_fingers):
self._gripper_pid[i] = JacoPID(5.0,0.0,0.8)
self._gripper_vff = DifferentiateSignals(self.num_fingers, self._gripper_fb['position'])
self._gripper_rate_limit = RateLimitSignals([FINGER_ANGULAR_VEL_LIMIT]*self.num_fingers,self.num_fingers,self._gripper_fb['position'])
if ("6dof" == self.arm_dof):
self._arm_rate_limit = RateLimitSignals(JOINT_6DOF_VEL_LIMITS,self._num_joints,self._joint_fb['position'])
if ("7dof" == self.arm_dof):
self._arm_rate_limit = RateLimitSignals(JOINT_7DOF_VEL_LIMITS, self._num_joints, self._joint_fb['position'])
self._arm_vff_diff = DifferentiateSignals(self._num_joints, self._joint_fb['position'])
self._pid = [None]*self._num_joints
for i in range(self._num_joints):
self._pid[i] = JacoPID(5.0,0.0,0.8)
self.pause_controller = False
self._init_ext_joint_position_control()
self._init_ext_gripper_control()
# Update the feedback once to get things initialized
self._update_controller_data()
# Start the controller
rospy.loginfo("Starting the %s controller"%self._prefix)
self._done = False
self._t1 = rospy.Timer(rospy.Duration(0.01),self._run_ctl)
def _init_ext_joint_position_control(self):
"""
Initialize the PID controllers, command interface, data processing and
controller data for the arm
"""
for pid in self._pid:
pid.initialize()
self._pid_error = [0.0]*self._num_joints
self._pid_output = [0.0]*self._num_joints
self._arm_cmds = dict()
self._arm_cmds['position'] = self._joint_fb['position']
self._arm_cmds['velocity'] = [0.0]*self._num_joints
self._arm_cmds['acceleration'] = [0.0]*self._num_joints
self._arm_rate_limit.Reset(self._arm_cmds['position'])
self._arm_vff_diff.Reset(self._arm_cmds['position'])
def _init_ext_gripper_control(self):
"""
Initialize the PID controllers, command interface, data processing and
controller data for the gripper
"""
if (0 != self.num_fingers):
for pid in self._gripper_pid:
pid.initialize()
self._gripper_pid_error = [0.0]*self.num_fingers
self._gripper_pid_output = [0.0]*self.num_fingers
self._gripper_pos_cmds = self._gripper_fb['position']
self._gripper_vff.Reset(self._gripper_pos_cmds)
self._gripper_rate_limit.Reset(self._gripper_pos_cmds)
def _update_gripper_vel_cmd(self,cmd):
self._gripper_vel_cmd = cmd.data
def _update_cartesian_vel_cmd(self,cmds):
with self._lock:
self._last_cartesian_vel_cmd = [
cmds.x,
cmds.y,
cmds.z,
cmds.theta_x,
cmds.theta_y,
cmds.theta_z,
self._gripper_vel_cmd
]
# Switch control mode if needs be
if (self._ctl_mode != SIArmController.CARTESIAN_VELOCITY):
self._ctl_mode = SIArmController.CARTESIAN_VELOCITY
self.api.set_control_mode(KinovaAPI.CARTESIAN_CONTROL)
# Un-pause the controller
self.pause_controller = False
self.last_cartesian_vel_cmd_update = rospy.get_time()
def _update_angular_vel_cmd(self,cmds):
with self._lock:
if "6dof" == self.arm_dof:
self._last_angular_vel_cmd = [
cmds.theta_shoulder_pan_joint,
cmds.theta_shoulder_lift_joint,
cmds.theta_elbow_joint,
cmds.theta_wrist_1_joint,
cmds.theta_wrist_2_joint,
cmds.theta_wrist_3_joint
]
elif "7dof" == self.arm_dof:
self._last_angular_vel_cmd = [
cmds.theta_shoulder_pan_joint,
cmds.theta_shoulder_lift_joint,
cmds.theta_arm_half_joint,
cmds.theta_elbow_joint,
cmds.theta_wrist_spherical_1_joint,
cmds.theta_wrist_spherical_2_joint,
cmds.theta_wrist_3_joint
]
else:
# Error condition
rospy.logerr("DoF needs to be set 6 or 7, was {}".format(self.arm_dof))
self.Stop()
return
# Append gripper commands to the cmds list
for i in range(3):
self._last_angular_vel_cmd.append(self._gripper_vel_cmd)
# Switch control mode if needs be
if (self._ctl_mode != SIArmController.ANGULAR_VELOCITY):
self._ctl_mode = SIArmController.ANGULAR_VELOCITY
self.api.set_control_mode(KinovaAPI.ANGULAR_CONTROL)
# Un-pause the controller
self.pause_controller = False
self.last_angular_vel_cmd_update = rospy.get_time()
def SetEstop(self):
self._init_ext_joint_position_control()
self.estop = True
def ClearEstop(self):
self.estop = False
def Stop(self):
rospy.loginfo("Stopping the %s arm controller"%self._prefix)
with self._lock:
try:
self._t1.shutdown()
except:
pass
try:
self._jspub.unregister()
self._cartesian_vel_cmd_sub.unregister()
self._jspub.unregister()
except:
pass
self.api.Shutdown()
rospy.loginfo("%s arm controller has stopped"%self._prefix)
self._done = True
def _is_shutdown(self):
if rospy.is_shutdown():
self.Stop()
return self._done
def UpdatePIDGains(self,pid_gains):
new_pid_gains = [pid_gains[jnt] for jnt in self._joint_names]
def Pause(self):
self.pause_controller = True
def Resume(self):
self.pause_controller = False
def GetCtlStatus(self):
return self.api.api_online
def SetPositionHold(self):
if self._position_hold:
return
with self._lock:
self._position_hold=True
self._arm_cmds['position'] = self._joint_fb['position']
self._arm_cmds['velocity'] = [0.0]*self._num_joints
self._arm_cmds['acceleration'] = [0.0]*self._num_joints
def ClearPositionHold(self):
with self._lock:
self._position_hold=False
def CommandJoints(self, pos, vel=None, acc=None):
"""
Command the arm with desired joint positions
Supports soft velocity and acceleration constraints
"""
if self._position_hold:
return False
with self._lock:
self._arm_cmds['position'] = [pos[jnt] for jnt in self._joint_names]
tmp = [i for i in self._arm_cmds['position']]
for jnt in range(self._num_joints):
if ("6dof" == self.arm_dof):
if (jnt!=1) and (jnt!=2):
self._arm_cmds['position'][jnt] = get_smallest_difference_to_cont_angle(tmp[jnt],self._joint_fb['position'][jnt])
if ("7dof" == self.arm_dof):
if (jnt!=1) and (jnt!=3) and (jnt!=5):
self._arm_cmds['position'][jnt] = get_smallest_difference_to_cont_angle(tmp[jnt],self._joint_fb['position'][jnt])
if vel:
self._arm_cmds['velocity'] = [vel[jnt] for jnt in self._joint_names]
else:
self._arm_cmds['velocity'] = [0.0]*self._num_joints
if acc:
self._arm_cmds['acceleration'] = [acc[jnt] for jnt in self._joint_names]
else:
self._arm_cmds['acceleration'] = [0.0]*self._num_joints
return True
def CommandGripper(self, finger_pos):
"""
Command the gripper with a desired finger position
"""
with self._lock:
self._gripper_pos_cmds = [finger_pos]*self.num_fingers
def GetGripperFdbk(self):
gripperfdbk = [0]*3
with self._lock:
gripperfdbk[0] = self._gripper_fb['position']
gripperfdbk[1] = self._gripper_fb['velocity']
tmp = self._actfdbk_msg.current[self._num_joints:self._num_joints+self.num_fingers]
gripperfdbk[2] = [(i/0.8) * 25 for i in tmp]
return gripperfdbk
def StopGripper(self):
with self._lock:
self._gripper_pos_cmds = self._gripper_fb['position']
def GetCurrentJointPosition(self, joint_names):
with self._lock:
pos = dict(zip(self._jsmsg.name,self._joint_fb['position']))
pos = [pos[jnt] for jnt in joint_names]
return pos
def GetCurrentJointVelocity(self,joint_names):
with self._lock:
vel = dict(zip(self._jsmsg.name,self._joint_fb['velocity']))
vel = [vel[jnt] for jnt in joint_names]
return vel
def GetCurrentJointPositionError(self,joint_names):
with self._lock:
pos_error = dict(zip(self._jsmsg.name,self._pid_error))
pos_error = [pos_error[jnt] for jnt in joint_names]
return pos_error
def _update_controller_data(self):
pos = self.api.get_angular_position()
vel = self.api.get_angular_velocity()
angular_force = self.api.get_angular_force()
sensor_data = self.api.get_sensor_data()
cartesian_force = self.api.get_cartesian_force()
angular_force_gravity_free = self.api.get_angular_force_gravity_free()
if(len(sensor_data[0]) > 0):
self._actfdbk_msg.current = sensor_data[0]
if(len(sensor_data[1]) > 0):
self._actfdbk_msg.temperature = sensor_data[1]
self._actfdbk_msg.header.stamp = rospy.get_rostime()
self._actfdbk_msg.header.seq+=1
self._actfdbk_pub.publish(self._actfdbk_msg)
if(len(pos) > 0):
self._joint_fb['position'] = pos[:self._num_joints]
if(len(vel) > 0):
self._joint_fb['velocity'] = vel[:self._num_joints]
if(len(angular_force) > 0):
self._joint_fb['force'] = angular_force[:self._num_joints]
tmp = [0.0]*self._num_joints
if ("6dof"== self.arm_dof):
tmp[0] = wrap_angle(self._joint_fb['position'][0])
tmp[1] = self._joint_fb['position'][1]
tmp[2] = self._joint_fb['position'][2]
tmp[3] = wrap_angle(self._joint_fb['position'][3])
tmp[4] = wrap_angle(self._joint_fb['position'][4])
tmp[5] = wrap_angle(self._joint_fb['position'][5])
if("7dof"== self.arm_dof):
tmp[0] = wrap_angle(self._joint_fb['position'][0])
tmp[1] = self._joint_fb['position'][1]
tmp[2] = wrap_angle(self._joint_fb['position'][2])
tmp[3] = self._joint_fb['position'][3]
tmp[4] = wrap_angle(self._joint_fb['position'][4])
tmp[5] = self._joint_fb['position'][5]
tmp[6] = wrap_angle(self._joint_fb['position'][6])
self._jsmsg.header.stamp = rospy.get_rostime()
self._jsmsg.position = tmp
self._jsmsg.velocity = self._joint_fb['velocity']
self._jsmsg.effort = self._joint_fb['force']
self._jspub.publish(self._jsmsg)
self._jsmsg.header.seq+=1
if (0 != self.num_fingers and "rq85" != self.gripper):
if (len(pos) > 0):
self._gripper_fb['position'] = pos[self._num_joints:self._num_joints+self.num_fingers]
if (len(vel) > 0):
self._gripper_fb['velocity'] = vel[self._num_joints:self._num_joints+self.num_fingers]
if (len(angular_force) > 0):
self._gripper_fb['force'] = angular_force[self._num_joints:self._num_joints+self.num_fingers]
self._gripper_jsmsg.header.stamp = rospy.get_rostime()
self._gripper_jsmsg.position = self._gripper_fb['position']
self._gripper_jsmsg.velocity = self._gripper_fb['velocity']
self._gripper_jsmsg.effort = self._gripper_fb['force']
self._gripper_jspub.publish(self._gripper_jsmsg)
self._gripper_jsmsg.header.seq+=1
# update and publish cartesian force (wrench)
self._cartesianforce_msg.header.stamp = rospy.get_rostime()
self._cartesianforce_msg.x = cartesian_force[0]
self._cartesianforce_msg.y = cartesian_force[1]
self._cartesianforce_msg.z = cartesian_force[2]
self._cartesianforce_msg.theta_x = cartesian_force[3]
self._cartesianforce_msg.theta_y = cartesian_force[4]
self._cartesianforce_msg.theta_z = cartesian_force[5]
self._cartesianforce_pub.publish(self._cartesianforce_msg)
self._cartesianforce_msg.header.seq += 1
# update and publish angular force gravity free(joint torque)
self._angularforce_gravityfree_msg.header.stamp = rospy.get_rostime()
self._angularforce_gravityfree_msg.joint = [round(x, 3) for x in angular_force_gravity_free]
self._angularforce_gravityfree_pub.publish(self._angularforce_gravityfree_msg)
self._angularforce_gravityfree_msg.header.seq += 1
def _run_ctl(self,events):
if self._is_shutdown():
return
with self._lock:
# First update the controller data
self._update_controller_data()
# Don't do anything if we're e-stopped
if self.estop:
return
if (True == self.pause_controller):
# If we're paused, don't run any PID calcs, just output zero
# commands
self._init_ext_joint_position_control()
# XXX ajs 19/Mar/2018 Not sure if the gripper should be
# initialised here as well? Original code didn't.
#self._init_ext_gripper_control()
if (SIArmController.CARTESIAN_VELOCITY == self._ctl_mode):
# Send zero cartesian commands
# X, Y, Z, ThetaX, ThetaY, ThetaZ, FingerVel
self.api.send_cartesian_vel_cmd([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
elif (SIArmController.ANGULAR_VELOCITY == self._ctl_mode) or\
(SIArmController.TRAJECTORY == self._ctl_mode):
# Send zero angular commands to all joints and the fingers
self.api.send_angular_vel_cmds([0.0] * (self._num_joints + self.num_fingers))
else:
rospy.logerr("{} arm controller: Unrecognized control mode {}".format(
self._prefix,
self._ctl_mode
))
return
if (SIArmController.TRAJECTORY == self._ctl_mode):
# Handle trajectory control - a PID loop tracks the desired
# positions (in angular or cartesian space), and computes
# requisite angular controller velocities
# Compute the error and update the feedforward terms
arm_cmds_lim = self._arm_rate_limit.Update(self._arm_cmds['position'])
vff = self._arm_vff_diff.Update(arm_cmds_lim)
scaled_ff_vel = map(operator.mul, vff, [1.0] * self._num_joints)
scaled_ff_acc = map(operator.mul, self._arm_cmds['acceleration'], [0.0] * self._num_joints)
ff_terms = map(operator.add, scaled_ff_vel, scaled_ff_acc)
self._pid_error = map(operator.sub, arm_cmds_lim, self._joint_fb['position'])
self._pid_output = [self._pid[i].compute_output(self._pid_error[i]) for i in range(self._num_joints)]
self._pid_output = map(operator.add,self._pid_output, ff_terms)
if ("6dof" == self.arm_dof):
self._pid_output = [rad_to_deg(limit(self._pid_output[i],JOINT_6DOF_VEL_LIMITS[i])) for i in range(self._num_joints)]
if ("7dof" == self.arm_dof):
self._pid_output = [rad_to_deg(limit(self._pid_output[i],JOINT_7DOF_VEL_LIMITS[i])) for i in range(self._num_joints)]
# Prepare command array
cmds = self._pid_output
# Compute finger rates
if (0 != self.num_fingers):
gripper_cmds_lim = self._gripper_rate_limit.Update(self._gripper_pos_cmds)
vff = self._gripper_vff.Update(gripper_cmds_lim)
self._gripper_pid_error = map(operator.sub, gripper_cmds_lim, self._gripper_fb['position'])
self._gripper_pid_output = [self._gripper_pid[i].compute_output(self._gripper_pid_error[i]) for i in range(self.num_fingers)]
self._gripper_pid_output = map(operator.add, self._gripper_pid_output, vff)
self._gripper_pid_output = [rad_to_deg(limit(self._gripper_pid_output[i],FINGER_ANGULAR_VEL_LIMIT)) for i in range(self.num_fingers)]
# Append gripper commands to the cmds list
for i in range(3):
if (i < self.num_fingers):
cmds.append(self._gripper_pid_output[i])
else:
cmds.append(0.0)
# Send the command via the API
self.api.send_angular_vel_cmds(cmds)
# Finally, publish the angular position controller state
self._jstmsg.header.frame_id = ''
self._jstmsg.header.stamp = rospy.get_rostime()
self._jstmsg.desired.positions=self._arm_cmds['position']
self._jstmsg.desired.velocities=self._arm_cmds['velocity']
self._jstmsg.desired.accelerations=self._arm_cmds['acceleration']
self._jstmsg.actual.positions=self._joint_fb['position']
self._jstmsg.actual.velocities=self._joint_fb['velocity']
self._jstmsg.actual.accelerations=[0.0]*self._num_joints
self._jstmsg.error.positions = self._pid_error
self._jstmsg.error.velocities= map(operator.sub, self._arm_cmds['velocity'], self._joint_fb['velocity'])
self._jstmsg.error.accelerations=[0.0]*self._num_joints
self._jstpub.publish(self._jstmsg)
self._jstmsg.header.seq +=1
elif (SIArmController.ANGULAR_VELOCITY == self._ctl_mode):
# Handle angular velocity control - angular velocities are
# directly passed through to the lower level controller
self._init_ext_joint_position_control()
self._init_ext_gripper_control()
# Safety check: If it has been more than 1 second since
# the last command, drop back to trajectory control and
# pause the controller
if ((rospy.get_time() - self.last_angular_vel_cmd_update) >= 1.0):
self._ctl_mode = SIArmController.TRAJECTORY
self.api.set_control_mode(KinovaAPI.ANGULAR_CONTROL)
self.pause_controller = True
return
# Safety check: If it has been more than 0.5 seconds since
# the last command, zero the velocities
if ((rospy.get_time() - self.last_angular_vel_cmd_update) >= 0.5):
self._last_angular_vel_cmd = [0.0] * (self._num_joints + self.num_fingers)
# Safety check: Apply rate limits for arm joints
cmd_limited = self._last_angular_vel_cmd
for joint in range(self._num_joints):
if "6dof" == self.arm_dof:
cmd_limited[joint] = rad_to_deg(
limit(
deg_to_rad(cmd_limited[joint]),
JOINT_6DOF_VEL_LIMITS[joint]
)
)
elif "7dof" == self.arm_dof:
cmd_limited[joint] = rad_to_deg(
limit(
deg_to_rad(cmd_limited[joint]),
JOINT_7DOF_VEL_LIMITS[joint]
)
)
else:
# Error condition
rospy.logerr("DoF needs to be set 6 or 7, was {}".format(self.arm_dof))
self.Stop()
return
# Safety check: Apply rate limits for finger joints
for finger in range(self.num_fingers):
cmd_limited[self._num_joints + finger] = rad_to_deg(
limit(
deg_to_rad(cmd_limited[self._num_joints + finger]),
FINGER_ANGULAR_VEL_LIMIT
)
)
# Command angular velocities
self.api.send_angular_vel_cmds(cmd_limited)
elif (SIArmController.CARTESIAN_VELOCITY == self._ctl_mode):
# Handle cartesian velocity control mode - cartesian
# velocities are passed through to the lower level controller
# after some basic safety / timeout checks
self._init_ext_joint_position_control()
self._init_ext_gripper_control()
# Safety check: If it has been more than 1 second since
# the last command, drop back to angular position control
if ((rospy.get_time() - self.last_cartesian_vel_cmd_update) >= 1.0):
self._ctl_mode = SIArmController.TRAJECTORY
self.api.set_control_mode(KinovaAPI.ANGULAR_CONTROL)
self.pause_controller = True
return
# Safety check: If it has been more than 0.5 seconds since
# the last command, zero the velocities
if ((rospy.get_time() - self.last_cartesian_vel_cmd_update) >= 0.5):
self._last_cartesian_vel_cmd = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Command cartesian velocities
self.api.send_cartesian_vel_cmd(self._last_cartesian_vel_cmd)
else:
# Unknown control mode
rospy.logerr("{} arm controller: Unrecognized control mode {}".format(
self._prefix,
self._ctl_mode
))
return
| 43.589286 | 153 | 0.594253 |
acf0bfe3f2b7e06a67004e6162f7a1ff5b5faa8b | 2,156 | py | Python | src/doffin.py | LTS-AS/Bodil | a8dcd38ab70bd604b48fa52d9e59e59ea68c2f5a | [
"Apache-2.0"
] | null | null | null | src/doffin.py | LTS-AS/Bodil | a8dcd38ab70bd604b48fa52d9e59e59ea68c2f5a | [
"Apache-2.0"
] | null | null | null | src/doffin.py | LTS-AS/Bodil | a8dcd38ab70bd604b48fa52d9e59e59ea68c2f5a | [
"Apache-2.0"
] | null | null | null | import webbrowser
cpvs=[
'30200000', #Datautstyr og -materiell
####34=Transportutstyr og hjelpeprodukter til transport
'34970000', #Trafikkovervåkningssystem
'34996000', #Kontroll-, sikkerhets- eller signalutstyr til veier
'34997000', #Kontroll-, sikkerhets- eller signalutstyr til lufthavner
'34998000', #Kontroll-, sikkerhets- eller signalutstyr til havner
'34999000', #Signalgeneratorer, signalfordelere til antenner og galvaniseringsmaskiner
####35=Sikkerhets-, brannslukkings-, politi- og forsvarsutstyr
'35125000', #Overvåkingssystemer
####42=Diverse maskineri til generelt og spesielt bruk
'42122220', #Avløpspumper
'42500000', # Kjøle- og ventilasjonsutstyr
'42520000', #Ventilasjonsutstyr
'42961000', #Adgangskontrollsystemer
'42961200', #Scada eller lignende system
'45331000', #Installasjon av varme-, ventilasjons- og klimaanlegg
'45331210', #Arbeid i forbindelse med ventilasjonsanlegg
'48921000', #Automasjonssystemer
'71315410', #Inspeksjon av ventilasjonssystemer
####98=Andre kommunale, sosiale og personlige tjenester
'98395000', #Låsesmedtjenester
'98363000', #Dykking
]
url = 'https://www.doffin.no/Notice?query='
url = url + '&PageNumber=1'
url = url + '&PageSize=100'# elementer per side. 10, 25, 50 eller 100
url = url + '&OrderingType=2' #sorter på 0=relevans, 1=kunngjøringsdato, 2=tilbudsfrist
url = url + '&OrderingDirection=1'#0=stigende, 1=synkende
url = url + '&RegionId=6' # ''=alle, 6=Trøndelag, 12=Østfold
url = url + '&CountyId='
url = url + '&MunicipalityId='
url = url + '&IsAdvancedSearch=true' #ser ut som dette er flagget "inkluder overliggende"
url = url + '&location=6' #Samme som region??
url = url + '&NoticeType=2'#Kunngjøringstype ''=alle, 2=kunngjøring av konkurranse
url = url + '&PublicationType='
url = url + '&IncludeExpired=false'
url = url + '&Cpvs=' + '+'.join(cpvs)
url = url + '&EpsReferenceNr='
url = url + '&DeadlineFromDate='
url = url + '&DeadlineToDate='
url = url + '&PublishedFromDate='
url = url + '&PublishedToDate='
# Open URL in a new tab, if a browser window is already open.
webbrowser.open_new_tab(url)
| 43.12 | 90 | 0.716605 |
acf0c0007cfcc6dbb404587728543d5031852c76 | 6,373 | py | Python | sockeye/score.py | tholiao/sockeye | f33b600dc77ae9f295c05015e2af9045f3a74088 | [
"Apache-2.0"
] | 1 | 2020-03-05T08:12:03.000Z | 2020-03-05T08:12:03.000Z | sockeye/score.py | tholiao/sockeye | f33b600dc77ae9f295c05015e2af9045f3a74088 | [
"Apache-2.0"
] | null | null | null | sockeye/score.py | tholiao/sockeye | f33b600dc77ae9f295c05015e2af9045f3a74088 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Scoring CLI.
"""
import argparse
import logging
import os
from contextlib import ExitStack
from typing import Optional, List, Tuple
from . import arguments
from . import constants as C
from . import data_io
from . import inference
from . import model
from . import scoring
from . import utils
from . import vocab
from .log import setup_main_logger
from .output_handler import get_output_handler
from .utils import check_condition
# Temporary logger, the real one (logging to a file probably, will be created in the main function)
logger = logging.getLogger(__name__)
def main():
params = arguments.ConfigArgumentParser(description='Score data with an existing model.')
arguments.add_score_cli_args(params)
args = params.parse_args()
score(args)
def get_data_iters_and_vocabs(args: argparse.Namespace,
model_folder: Optional[str]) -> Tuple['data_io.BaseParallelSampleIter',
List[vocab.Vocab], vocab.Vocab, model.ModelConfig]:
"""
Loads the data iterators and vocabularies.
:param args: Arguments as returned by argparse.
:param model_folder: Output folder.
:return: The scoring data iterator as well as the source and target vocabularies.
"""
model_config = model.SockeyeModel.load_config(os.path.join(args.model, C.CONFIG_NAME))
if args.max_seq_len is None:
max_seq_len_source = model_config.config_data.max_seq_len_source
max_seq_len_target = model_config.config_data.max_seq_len_target
else:
max_seq_len_source, max_seq_len_target = args.max_seq_len
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
# Load the existing vocabs created when starting the training run.
source_vocabs = vocab.load_source_vocabs(model_folder)
target_vocab = vocab.load_target_vocab(model_folder)
sources = [args.source] + args.source_factors
sources = [str(os.path.abspath(source)) for source in sources]
score_iter = data_io.get_scoring_data_iters(
sources=sources,
target=os.path.abspath(args.target),
source_vocabs=source_vocabs,
target_vocab=target_vocab,
batch_size=args.batch_size,
batch_num_devices=batch_num_devices,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target)
return score_iter, source_vocabs, target_vocab, model_config
def score(args: argparse.Namespace):
setup_main_logger(file_logging=False,
console=not args.quiet,
level=args.loglevel) # pylint: disable=no-member
utils.log_basic_info(args)
with ExitStack() as exit_stack:
context = utils.determine_context(device_ids=args.device_ids,
use_cpu=args.use_cpu,
disable_device_locking=args.disable_device_locking,
lock_dir=args.lock_dir,
exit_stack=exit_stack)
if args.batch_type == C.BATCH_TYPE_SENTENCE:
check_condition(args.batch_size % len(context) == 0, "When using multiple devices the batch size must be "
"divisible by the number of devices. Choose a batch "
"size that is a multiple of %d." % len(context))
logger.info("Scoring Device(s): %s", ", ".join(str(c) for c in context))
# This call has a number of different parameters compared to training which reflect our need to get scores
# one-for-one and in the same order as the input data.
# To enable code reuse, we stuff the `args` parameter with some values.
# Bucketing and permuting need to be turned off in order to preserve the ordering of sentences.
# Finally, 'resume_training' needs to be set to True because it causes the model to be loaded instead of initialized.
args.no_bucketing = True
args.bucket_width = 10
score_iter, source_vocabs, target_vocab, model_config = get_data_iters_and_vocabs(
args=args,
model_folder=args.model)
scoring_model = scoring.ScoringModel(config=model_config,
model_dir=args.model,
context=context,
provide_data=score_iter.provide_data,
provide_label=score_iter.provide_label,
default_bucket_key=score_iter.default_bucket_key,
score_type=args.score_type,
length_penalty=inference.LengthPenalty(alpha=args.length_penalty_alpha,
beta=args.length_penalty_beta),
brevity_penalty=inference.BrevityPenalty(weight=args.brevity_penalty_weight),
softmax_temperature=args.softmax_temperature,
brevity_penalty_type=args.brevity_penalty_type,
constant_length_ratio=args.brevity_penalty_constant_length_ratio)
scorer = scoring.Scorer(scoring_model, source_vocabs, target_vocab)
scorer.score(score_iter=score_iter,
output_handler=get_output_handler(output_type=args.output_type,
output_fname=args.output))
if __name__ == "__main__":
main()
| 45.521429 | 125 | 0.627177 |
acf0c109d281370948fbb2d6d202fdc8d57b2a46 | 1,066 | py | Python | kubernetes/test/test_extensions_v1beta1_ingress_backend.py | itholic/python | dffe577a062e17057270ae80fa677ffd83e9d183 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_extensions_v1beta1_ingress_backend.py | itholic/python | dffe577a062e17057270ae80fa677ffd83e9d183 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_extensions_v1beta1_ingress_backend.py | itholic/python | dffe577a062e17057270ae80fa677ffd83e9d183 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.extensions_v1beta1_ingress_backend import ExtensionsV1beta1IngressBackend # noqa: E501
from kubernetes.client.rest import ApiException
class TestExtensionsV1beta1IngressBackend(unittest.TestCase):
"""ExtensionsV1beta1IngressBackend unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1IngressBackend(self):
"""Test ExtensionsV1beta1IngressBackend"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.extensions_v1beta1_ingress_backend.ExtensionsV1beta1IngressBackend() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.65 | 125 | 0.751407 |
acf0c209b427d6dab1e669ddc38b2bba82e23799 | 240 | py | Python | 1_WarmUp/Exercise01.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | 3 | 2022-01-04T19:02:22.000Z | 2022-02-21T08:52:18.000Z | 1_WarmUp/Exercise01.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | null | null | null | 1_WarmUp/Exercise01.py | takeyoshinitta/NLP-100-Exercise | e77fb385fbbf50c8a8bdc47442db1421739ea5b6 | [
"MIT"
] | null | null | null | # 01. "schooled"
# Obtain the string that concatenates the 1st, 3rd, 5th, and 7th letters in the string "schooled".
str = 'schooled'
strList = list(str)
newStr = ''
for i in range(1, len(strList), 2):
newStr += strList[i]
print(newStr)
| 26.666667 | 98 | 0.683333 |
acf0c24b0bdd6e6b14fca460566202cd655af9f7 | 290 | py | Python | tests/test_layers/convolutional/test_crystal_conv.py | JonaBecher/spektral | ff59e16d959e0ec698428997363be20462625699 | [
"MIT"
] | 2,145 | 2019-01-21T20:49:44.000Z | 2022-03-28T20:27:27.000Z | tests/test_layers/convolutional/test_crystal_conv.py | jasper-park/spektral | ad2d96549c00f68ce992a7d29e2c3fd025fb529b | [
"MIT"
] | 259 | 2019-01-22T05:18:19.000Z | 2022-03-25T10:46:10.000Z | tests/test_layers/convolutional/test_crystal_conv.py | jasper-park/spektral | ad2d96549c00f68ce992a7d29e2c3fd025fb529b | [
"MIT"
] | 322 | 2019-02-11T16:18:27.000Z | 2022-03-24T16:26:59.000Z | from core import MODES, run_layer
from spektral import layers
config = {
"layer": layers.CrystalConv,
"modes": [MODES["SINGLE"], MODES["MIXED"]],
"kwargs": {"channels": 7},
"dense": False,
"sparse": True,
"edges": True,
}
def test_layer():
run_layer(config)
| 17.058824 | 47 | 0.613793 |
acf0c2ff721ca3df03ed90dda0ae7a99de7bad00 | 355 | py | Python | Project1/tests/sportscar/body/test_body.py | automationmaki/pytest_simple_examples | 4cd5cdde2b0a359a4f14294ea9e7df2de949f309 | [
"MIT"
] | null | null | null | Project1/tests/sportscar/body/test_body.py | automationmaki/pytest_simple_examples | 4cd5cdde2b0a359a4f14294ea9e7df2de949f309 | [
"MIT"
] | null | null | null | Project1/tests/sportscar/body/test_body.py | automationmaki/pytest_simple_examples | 4cd5cdde2b0a359a4f14294ea9e7df2de949f309 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium import webdriver
@mark.smoke
@mark.body
class BodyTests:
@mark.ui
def test_can_navigate_to_body_page(self, chrome_browser):
chrome_browser.get('https://www.motortrend.com/')
assert True
def test_bumper(self):
assert True
def test_windshelf(self):
assert True | 20.882353 | 61 | 0.701408 |
acf0c3450c27933c3572ea15b0934375b43dfd67 | 364 | py | Python | umibukela/middleware.py | Code4SA/umibukela | 937d49d86c3aabde923775682d6fb39f521d0e4d | [
"MIT"
] | null | null | null | umibukela/middleware.py | Code4SA/umibukela | 937d49d86c3aabde923775682d6fb39f521d0e4d | [
"MIT"
] | 27 | 2015-12-02T13:58:56.000Z | 2017-04-07T13:27:25.000Z | umibukela/middleware.py | Code4SA/umibukela | 937d49d86c3aabde923775682d6fb39f521d0e4d | [
"MIT"
] | null | null | null | from django.http import HttpResponsePermanentRedirect
class RedirectsMiddleware(object):
""" Always redirect cbm.code4sa.org to cbm.blacksash.org.za
"""
def process_request(self, request):
if request.get_host() == 'cbm.code4sa.org':
return HttpResponsePermanentRedirect("https://cbm.blacksash.org.za%s" % request.get_full_path())
| 36.4 | 108 | 0.71978 |
acf0c361d776435f6ede318c9f09f50681333fce | 415 | py | Python | UsingPython/basic_algorithm/reverse_integer.py | Rick00Kim/Algorithm_coding | c988729462f3cef78e0b02f888e0117fdefaa5d1 | [
"MIT"
] | null | null | null | UsingPython/basic_algorithm/reverse_integer.py | Rick00Kim/Algorithm_coding | c988729462f3cef78e0b02f888e0117fdefaa5d1 | [
"MIT"
] | null | null | null | UsingPython/basic_algorithm/reverse_integer.py | Rick00Kim/Algorithm_coding | c988729462f3cef78e0b02f888e0117fdefaa5d1 | [
"MIT"
] | null | null | null | from .abstract_algorithm import AbstractAlgorithm
class ReverseInteger(AbstractAlgorithm):
def solution(self, n, debug):
rev = 0
while(n > 0):
a = n % 10
rev = int(rev * 10 + a)
n = int(n / 10)
print(rev)
return rev
def execute(self):
self.process(123, 321)
self.process(12345, 54321)
self.process(456, 654)
| 18.863636 | 49 | 0.53494 |
acf0c3900026d9b6c8256fbc298488bafd558a2c | 626 | py | Python | tests/test_validatewithip.py | zerobounce/zerobounce-python-api | bcf6283fe4330bf6346bd47e1002e477e0682212 | [
"MIT"
] | null | null | null | tests/test_validatewithip.py | zerobounce/zerobounce-python-api | bcf6283fe4330bf6346bd47e1002e477e0682212 | [
"MIT"
] | 1 | 2018-05-02T20:31:25.000Z | 2018-05-06T05:39:54.000Z | tests/test_validatewithip.py | zerobounce/zerobounce-python-api | bcf6283fe4330bf6346bd47e1002e477e0682212 | [
"MIT"
] | 2 | 2018-01-30T07:37:08.000Z | 2018-05-05T00:39:01.000Z | import responses
@responses.activate
def test_should_get_status_valid_when_validatewithip_lowerjill(
zerobounce, zerobounce_response_validatewithip):
url = 'https://api.zerobounce.net/v1/validatewithip?apikey=123456&ipaddress=99.123.12.122&email=flowerjill@aol.com'
responses.add(responses.GET,
url,
json=zerobounce_response_validatewithip,
status=200)
zerobounce = zerobounce.validatewithip("flowerjill@aol.com")
assert zerobounce['status'] == "Valid"
assert zerobounce['firstname'] == "Jill"
assert zerobounce['lastname'] == "Stein"
| 32.947368 | 119 | 0.699681 |
acf0c3a4df199c29227423fd8904b0c3f59b2426 | 2,862 | py | Python | compiler/tests/22_psram_1bank_8mux_func_test.py | ajaymr12/openram | b46fb724b603c79445bd5601db230468684ad233 | [
"BSD-3-Clause"
] | 1 | 2020-11-21T05:37:53.000Z | 2020-11-21T05:37:53.000Z | compiler/tests/22_psram_1bank_8mux_func_test.py | ajaymr12/openram | b46fb724b603c79445bd5601db230468684ad233 | [
"BSD-3-Clause"
] | null | null | null | compiler/tests/22_psram_1bank_8mux_func_test.py | ajaymr12/openram | b46fb724b603c79445bd5601db230468684ad233 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# See LICENSE for licensing information.
#
# Copyright (c) 2016-2019 Regents of the University of California and The Board
# of Regents for the Oklahoma Agricultural and Mechanical College
# (acting for and on behalf of Oklahoma State University)
# All rights reserved.
#
import unittest
from testutils import *
import sys,os
sys.path.append(os.getenv("OPENRAM_HOME"))
import globals
from globals import OPTS
from sram_factory import factory
import debug
#@unittest.skip("SKIPPING 22_psram_1bank_8mux_func_test")
class psram_1bank_8mux_func_test(openram_test):
def runTest(self):
config_file = "{}/tests/configs/config".format(os.getenv("OPENRAM_HOME"))
globals.init_openram(config_file)
OPTS.analytical_delay = False
OPTS.netlist_only = True
OPTS.trim_netlist = False
OPTS.bitcell = "pbitcell"
OPTS.replica_bitcell="replica_pbitcell"
OPTS.dummy_bitcell="dummy_pbitcell"
OPTS.num_rw_ports = 1
OPTS.num_r_ports = 0
OPTS.num_w_ports = 1
# This is a hack to reload the characterizer __init__ with the spice version
from importlib import reload
import characterizer
reload(characterizer)
from characterizer import functional, delay
from sram_config import sram_config
c = sram_config(word_size=4,
num_words=256,
num_banks=1)
c.words_per_row=8
c.recompute_sizes()
debug.info(1, "Functional test for {}rw,{}r,{}w psram with"
"{} bit words, {} words, {} words per row, {} banks".format(OPTS.num_rw_ports,
OPTS.num_r_ports,
OPTS.num_w_ports,
c.word_size,
c.num_words,
c.words_per_row,
c.num_banks))
s = factory.create(module_type="sram", sram_config=c)
tempspice = OPTS.openram_temp + "sram.sp"
s.sp_write(tempspice)
corner = (OPTS.process_corners[0], OPTS.supply_voltages[0], OPTS.temperatures[0])
f = functional(s.s, tempspice, corner)
(fail, error) = f.run()
self.assertTrue(fail,error)
globals.end_openram()
# run the test from the command line
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main(testRunner=debugTestRunner())
| 39.75 | 97 | 0.554507 |
acf0c3e2f3b6ffd4650e7396c77d39f13342ceda | 741 | py | Python | tests/format/substitutions.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | tests/format/substitutions.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | tests/format/substitutions.py | samkenxstream/buildstream | 2164ac3ad2854eea30f85af6af2bc8a0b8754f3f | [
"Apache-2.0"
] | null | null | null | # Pylint doesn't play well with fixtures and dependency injection from pytest
# pylint: disable=redefined-outer-name
import os
import pytest
from buildstream._testing import cli # pylint: disable=unused-import
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "project", "default")
# Test that output is formatted correctly, when there are multiple matches of a
# variable that is known to BuildStream.
#
@pytest.mark.datafiles(os.path.join(DATA_DIR))
def test_match_multiple(cli, datafiles):
project = str(datafiles)
result = cli.run(project=project, args=["show", "--format", "%{name} {name} %{name}", "manual.bst"])
result.assert_success()
assert result.output == "manual.bst {name} manual.bst\n"
| 35.285714 | 104 | 0.738192 |
acf0c6b803734c11db09e09d161798507c18a0a3 | 5,816 | py | Python | api/src/main/resources/application_detailed_summary.py | cgiraldo/platform-deployment-manager | e9cb295218878f8a75de56a8d15782d78d8edb14 | [
"Apache-2.0"
] | null | null | null | api/src/main/resources/application_detailed_summary.py | cgiraldo/platform-deployment-manager | e9cb295218878f8a75de56a8d15782d78d8edb14 | [
"Apache-2.0"
] | null | null | null | api/src/main/resources/application_detailed_summary.py | cgiraldo/platform-deployment-manager | e9cb295218878f8a75de56a8d15782d78d8edb14 | [
"Apache-2.0"
] | 1 | 2020-04-07T12:45:14.000Z | 2020-04-07T12:45:14.000Z | import time
import json
import logging
import sys
from importlib import import_module
from multiprocessing import TimeoutError as ThreadTimeoutError
from summary_aggregator import ComponentSummaryAggregator
from plugins_summary.yarn_connection import YarnConnection
from async_dispatcher import AsyncDispatcher
import application_registrar
import application_summary_registrar
import deployer_utils
# constants
SUMMARY_INTERVAL = 30
STATUS_INTERVAL = 0.1
REST_API_REQ_TIMEOUT = 5
MAX_APP_SUMMARY_TIMEOUT = 60
def milli_time():
return int(round(time.time() * 1000))
class ApplicationDetailedSummary(object):
def __init__(self, environment, config):
self._environment = environment
self._environment.update({'rest_api_req_timeout': REST_API_REQ_TIMEOUT})
self._config = config
self._application_registrar = application_registrar.HbaseApplicationRegistrar(environment['hbase_thrift_server'])
self._application_summary_registrar = application_summary_registrar.HBaseAppplicationSummary(environment['hbase_thrift_server'])
self._yarn_connection = YarnConnection(self._environment)
self._summary_aggregator = ComponentSummaryAggregator()
self._component_creators = {}
self.dispatcher = AsyncDispatcher(num_threads=4)
def generate(self):
"""
Update applications detailed summary
"""
applist = self._application_registrar.list_applications()
logging.info("List of applications: %s", ', '.join(applist))
self._application_summary_registrar.sync_with_dm(applist)
apps_to_be_processed = {}
for app in applist:
apps_to_be_processed.update({app: self.generate_summary(app)})
wait_time = 0
# waiting block for all the application to get completed
while len(apps_to_be_processed) != 0:
for app_name in apps_to_be_processed.keys():
try:
apps_to_be_processed[app_name].task.get(STATUS_INTERVAL) #
del apps_to_be_processed[app_name]
except ThreadTimeoutError:
wait_time += STATUS_INTERVAL # increasing the wait time by status interval
if round(wait_time, 1) % MAX_APP_SUMMARY_TIMEOUT == 0:
# logging out list of applications whose wait time exceeds the max app summary timeout, on the interval of same max app summary timeout
# i.e. every 60 seconds as per current max app summary timeout
logging.error("Timeout exceeded, %s applications waiting for %d seconds", (',').join(apps_to_be_processed.keys()), int(wait_time))
def generate_summary(self, application):
"""
Update HBase wih recent application summary
"""
def _do_generate():
try:
create_data = self._application_registrar.get_create_data(application)
input_data = {}
for component_name, component_data in create_data.iteritems():
input_data[component_name] = {}
input_data[component_name]["component_ref"] = self._load_creator(component_name)
input_data[component_name]["component_data"] = component_data
app_data = self._summary_aggregator.get_application_summary(application, input_data)
self._application_summary_registrar.post_to_hbase(app_data, application)
logging.debug("Application: %s, Status: %s", application, app_data[application]['aggregate_status'])
except Exception as ex:
logging.error('%s while trying to get status of application "%s"', str(ex), application)
return self.dispatcher.run_as_asynch(task=_do_generate)
def _load_creator(self, component_type):
creator = self._component_creators.get(component_type)
if creator is None:
cls = '%s%sComponentSummary' % (component_type[0].upper(), component_type[1:])
try:
module = import_module("plugins_summary.%s" % component_type)
self._component_creators[component_type] = getattr(module, cls)\
(self._environment, self._yarn_connection, self._application_summary_registrar)
creator = self._component_creators[component_type]
except ImportError as exception:
logging.error(
'Unable to load Creator for component type "%s" [%s]',
component_type,
exception)
return creator
def main():
"""
main
"""
config = None
with open('dm-config.json', 'r') as con:
config = json.load(con)
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.getLevelName(config['config']['log_level']),
stream=sys.stderr)
deployer_utils.fill_hadoop_env(config['environment'], config['config'])
summary = ApplicationDetailedSummary(config['environment'], config['config'])
logging.info('Starting... Building actual status for applications')
while True:
# making sure every 30 seconds generate summary initiated
start_time_on_cur_round = milli_time()
summary.generate()
finish_time_on_cur_round = (milli_time() - start_time_on_cur_round)/1000.0
logging.info("Finished generating summary, time taken %s seconds", str(finish_time_on_cur_round))
if finish_time_on_cur_round >= SUMMARY_INTERVAL:
continue
else:
# putting sleep only for the remainig time from the current round's time
time.sleep(SUMMARY_INTERVAL - finish_time_on_cur_round)
if __name__ == "__main__":
main()
| 41.248227 | 159 | 0.668501 |
acf0c6d1209b24753622e2433bb673b572c581a4 | 15,257 | py | Python | ui.py | NinaWie/PowerPlanner | d5e96d4f41f1dd26b22ced5f25ac3cd2309473dc | [
"MIT"
] | 2 | 2020-07-29T14:24:52.000Z | 2020-09-08T19:18:36.000Z | ui.py | NinaWie/PowerPlanner | d5e96d4f41f1dd26b22ced5f25ac3cd2309473dc | [
"MIT"
] | null | null | null | ui.py | NinaWie/PowerPlanner | d5e96d4f41f1dd26b22ced5f25ac3cd2309473dc | [
"MIT"
] | null | null | null | import numpy as np
import os
import pickle
import json
from types import SimpleNamespace
import kivy
from kivy.app import App
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.slider import Slider
from kivy.uix.widget import Widget
from kivy.graphics.texture import Texture
from kivy.uix.button import Button
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.textinput import TextInput
from kivy.core.window import Window
from kivy.lang import Builder
from kivy.uix.scatter import Scatter
from kivy.uix.popup import Popup
from PIL import Image as im
from power_planner.utils.utils import (
get_distance_surface, time_test_csv, compute_pylon_dists
)
from power_planner.graphs.implicit_lg import ImplicitLG
from power_planner.alternative_paths import AlternativePaths
from power_planner.ksp import KSP
class ImageFromArray(Widget):
def __init__(self, width, height, **kwargs):
super(ImageFromArray, self).__init__(**kwargs)
self.max_width = width
self.max_height = height
# img = np.random.rand(width, height, 3) * 255
img = np.zeros((width, height, 3))
self.set_array(img)
def set_array(self, img_in):
# img_in = img_in[20:-20, 20:-20, :3]
self.current_in_img = img_in
img_in = np.flip(np.swapaxes(img_in, 1, 0), axis=0).astype(np.uint8)
h, w, _ = img_in.shape
# compute how much we have to resize it to make it fit bounds
ratio_resize = max([w / self.max_width, h / self.max_height])
# convert to PIL Image - Note: axes are swapped!
img_in = im.fromarray(img_in)
new_img_size = (int(w / ratio_resize), int(h / ratio_resize))
# resize
img = np.array(img_in.resize(new_img_size, resample=im.BILINEAR))
self.current_img = img
# make texture
texture = Texture.create(size=new_img_size)
texture.blit_buffer(img.flatten(), colorfmt='rgb', bufferfmt='ubyte')
self.take_size = new_img_size
w_img = PressableImage(size=new_img_size, texture=texture)
self.add_widget(w_img)
class PressableImage(Image):
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
print("pressed", touch.pos)
# touch_str = str(round(touch.pos[0])
# ) + "_" + str(round(touch.pos[1]))
touch_str = str(
np.around(
self.parent.current_img[int(touch.pos[0]),
int(touch.pos[1])], 1
)
)
popupWindow = Popup(
title="Resistance",
content=Label(text=touch_str),
size_hint=(None, None),
size=(150, 100)
)
popupWindow.open()
# return super(PressableImage, self).on_touch_down(touch)
class ResizableDraggablePicture(Scatter):
def on_touch_down(self, touch):
# Override Scatter's `on_touch_down` behavior for mouse scroll
if touch.is_mouse_scrolling:
if touch.button == 'scrolldown':
if self.scale < 10:
self.scale = self.scale * 1.1
elif touch.button == 'scrollup':
if self.scale > 1:
self.scale = self.scale * 0.8
# If some other kind of "touch": Fall back on Scatter's behavior
else:
super(ResizableDraggablePicture, self).on_touch_down(touch)
class DemoApp(App):
def build(self):
# Defaults
self.SCALE_PARAM = 5
self.sliders_initialized = 0
right_bar_size = .3
slider_bar_size = .2
superBox = BoxLayout(orientation='vertical')
# Data read field
data_box = BoxLayout(
orientation='horizontal', size_hint=(1, None), height=30
)
self.filepath = TextInput(
hint_text="data/test_data_1_2.dat",
size_hint=(1 - right_bar_size, 1)
)
self.filepath.text = "data/test_data_1_2.dat" # set default
self.load_data = Button(
text='Load data',
on_press=self.loadData,
size_hint=(right_bar_size + 0.05, 1)
)
data_box.add_widget(self.filepath)
data_box.add_widget(self.load_data)
# configuration json
config_box = BoxLayout(
orientation='horizontal', size_hint=(1, None), height=30
)
self.json_fp = TextInput(
hint_text="data/ch_config.json", size_hint=(1 - right_bar_size, 1)
)
self.json_fp.text = "data/ch_config.json" # set manually here
self.load_json_but = Button(
text='Load configuration',
on_press=self.load_json,
size_hint=(right_bar_size, 1)
)
self.load_json_but.disabled = True
# config_box.add_widget(self.json_fp)
# config_box.add_widget(self.load_json_but)
# Declare initial status
self.data_loaded = False
self.json_loaded = False
# Sliders
self.slider_box = BoxLayout(
orientation='vertical', width=Window.width * slider_bar_size
)
# additional buttons
button_box = BoxLayout(
orientation='vertical', width=Window.width * right_bar_size
)
# Define right side buttons
self.single_button = Button(
text="Single shortest path",
on_press=self.single_sp,
size=(Window.width * right_bar_size, 30)
)
self.sp_tree_button = Button(
text="Shortest path trees",
on_press=self.sp_tree,
size=(Window.width * right_bar_size, 30)
)
# self.sp_button = Button(
# text="Shortest path",
# on_press=self.shortest_path,
# size=(Window.width * right_bar_size, 30)
# )
self.ksp_button = Button(
text="KSP",
on_press=self.ksp,
size=(Window.width * right_bar_size, 30)
)
self.alternative_button = Button(
text="Informed path routing",
on_press=self.rect_popup,
size=(Window.width * right_bar_size, 30)
)
# Add to widget
for button in [
self.single_button, self.sp_tree_button, self.ksp_button,
self.alternative_button
]:
button.disabled = True
button_box.add_widget(button)
# make horizontal box with canvas and buttons
canv_box = BoxLayout(orientation='horizontal')
self.img_widget = ImageFromArray(600, 500)
# for scroll function, add the comment - but not working well
self.scatter_widget = Scatter() # ResizableDraggablePicture()
self.scatter_widget.add_widget(self.img_widget)
canv_box.add_widget(self.scatter_widget)
canv_box.add_widget(self.slider_box)
canv_box.add_widget(button_box)
# add to final box
superBox.add_widget(data_box)
superBox.add_widget(config_box)
superBox.add_widget(canv_box)
return superBox
def _mark_start_dest(self, buffer=2):
(x, y) = tuple(self.config.graph.start_inds)
self.disp_inst[x - buffer:x + buffer + 1, y - buffer:y + buffer +
1] = [255, 255, 0]
(x, y) = tuple(self.config.graph.dest_inds)
self.disp_inst[x - buffer:x + buffer + 1, y - buffer:y + buffer +
1] = [255, 255, 0]
def loadData(self, instance):
# self.filepath.text = str(os.path.exists(self.filepath.text))
if os.path.exists(self.filepath.text):
with open(self.filepath.text, "rb") as infile:
data = pickle.load(infile)
(
self.instance, self.edge_inst, self.instance_corr,
self.config
) = data
print(self.instance.shape)
# disp instance is with RGB and overlayed with corridor
self.disp_inst = (
np.moveaxis(self.instance, 0, -1)[:, :, :3] * 255
) * np.expand_dims(self.instance_corr, 2)
self._mark_start_dest()
print(self.disp_inst.shape)
self.img_widget.set_array(self.disp_inst)
self.graph = ImplicitLG(
self.instance,
self.instance_corr,
edge_instance=self.edge_inst,
)
self.cfg = self.config.graph
self.SCALE_PARAM = int(self.filepath.text.split(".")[0][-1])
# enable button
self.load_json_but.disabled = False
self.sp_tree_button.disabled = False
self.single_button.disabled = False
# init sliders
self.init_slider_box(instance)
def init_slider_box(self, instance):
if not self.sliders_initialized:
# Sliders for angle and edges
angle_label = Label(text="Angle weight")
self.angle_slider = Slider(min=0, max=1)
edge_label = Label(text="Edge weight")
self.edge_slider = Slider(min=0, max=1)
self.slider_box.add_widget(edge_label)
self.slider_box.add_widget(self.edge_slider)
self.slider_box.add_widget(angle_label)
self.slider_box.add_widget(self.angle_slider)
# make one slider for each category
self.weight_sliders = []
for name in self.cfg.layer_classes:
label = Label(text=name)
slider = Slider(min=0, max=1)
self.weight_sliders.append(slider)
self.slider_box.add_widget(label)
self.slider_box.add_widget(slider)
# set it to initialized
self.sliders_initialized = 1
# UPDATE VALUES ACCORDING TO CONFIG
self.angle_slider.value = self.cfg.angle_weight
self.edge_slider.value = self.cfg.edge_weight
normed_weights = np.asarray(self.cfg.class_weights
) / np.sum(self.cfg.class_weights)
for i in range(len(normed_weights)):
self.weight_sliders[i].value = float(normed_weights[i])
def load_json(self, instance):
# with open(self.json_fp.text, "r") as infile:
# self.cfg_dict = json.load(infile)
# self.cfg = SimpleNamespace(**self.cfg_dict)
# (self.cfg.PYLON_DIST_MIN,
# self.cfg.PYLON_DIST_MAX) = compute_pylon_dists(
# self.cfg.PYLON_DIST_MIN, self.cfg.PYLON_DIST_MAX,
# self.cfg.RASTER, self.SCALE_PARAM
# )
# self.init_slider_box(instance)
pass
def single_sp(self, instance, buffer=1):
new_class_weights = [slider.value for slider in self.weight_sliders]
self.cfg.class_weights = new_class_weights
self.cfg.angle_weight = self.angle_slider.value
self.cfg.edge_weight = self.edge_slider.value
# new_img = (np.random.rand(1000, 400, 3) * 150)
# self.img_widget.set_array(new_img)
path, _, _ = self.graph.single_sp(**vars(self.cfg))
plotted_inst = self.path_plotter(
self.disp_inst.copy(), path, [255, 255, 255], buffer=buffer
)
self.img_widget.set_array(plotted_inst)
print("Done single shortest path")
def sp_tree(self, instance, buffer=1):
new_class_weights = [slider.value for slider in self.weight_sliders]
self.cfg.class_weights = new_class_weights
self.cfg.angle_weight = self.angle_slider.value
self.cfg.edge_weight = self.edge_slider.value
# set edge cost (must be repeated because of angle weight)
path, _, _ = self.graph.sp_trees(**vars(self.cfg))
# plot the path
plotted_inst = self.path_plotter(
self.disp_inst.copy(), path, [255, 255, 255], buffer=buffer
)
self.img_widget.set_array(plotted_inst)
# enable KSP
self.ksp_button.disabled = False
self.alternative_button.disabled = False
print("Done shortest path trees")
def ksp(self, instance, buffer=1):
ksp = KSP(self.graph)
ksp_output = ksp.laplace(5, thresh=20)
paths = [k[0] for k in ksp_output]
plotted_inst = self.disp_inst.copy()
for i in range(len(paths) - 1, -1, -1):
path = paths[i]
val = 255 - i * 30
plotted_inst = self.path_plotter(
plotted_inst, path, [val, val, val], buffer=buffer
)
self.img_widget.set_array(plotted_inst)
print("ksp done")
def paint_rectangle(self, instance, buffer=2):
try:
ul_x = int(self.rect_text[0].text)
ul_y = int(self.rect_text[1].text)
br_x = int(self.rect_text[2].text)
br_y = int(self.rect_text[3].text)
except ValueError:
print("error: not all values given")
return
copied_inst = self.disp_inst.copy()
copied_inst[ul_x:br_x, ul_y - buffer:ul_y + buffer + 1] = [255, 0, 0]
copied_inst[ul_x:br_x, br_y - buffer:br_y + buffer + 1] = [255, 0, 0]
copied_inst[ul_x - buffer:ul_x + buffer + 1, ul_y:br_y] = [255, 0, 0]
copied_inst[br_x - buffer:br_x + buffer + 1, ul_y:br_y] = [255, 0, 0]
self.rect = (ul_x, br_x, ul_y, br_y)
self.img_widget.set_array(copied_inst)
self.but_dismiss.disabled = False
def rect_popup(self, instance):
box = BoxLayout(orientation='vertical', height=200)
text_labels = [
"Upper left X", "Upper left Y", "Lower right X", "lower right Y"
]
text = text_labels # [150, 250, 200, 300]
self.rect_text = []
for (l, t) in zip(text_labels, text):
t_in = TextInput(hint_text=l)
t_in.text = str(t)
self.rect_text.append(t_in)
box.add_widget(t_in)
# add buttons
but = Button(text='Paint rectangle', on_press=self.paint_rectangle)
self.but_dismiss = Button(text='Finish', on_press=self.alternative)
self.but_dismiss.disabled = True
box.add_widget(but)
box.add_widget(self.but_dismiss)
# define popup
self.popupWindow = Popup(
title="Set rectangle",
content=box,
size_hint=(None, None),
size=(300, 300)
)
self.popupWindow.open()
def alternative(self, instance, buffer=2):
self.popupWindow.dismiss()
print("rect", self.rect)
alt = AlternativePaths(self.graph)
replacement_path, _, _ = alt.path_through_window(*self.rect)
plot_inst = self.img_widget.current_in_img.copy()
plotted_inst = self.path_plotter(
plot_inst, replacement_path, [255, 0, 0], buffer=buffer
)
self.img_widget.set_array(plotted_inst)
print("replacement done")
def path_plotter(self, plotted_inst, path, col, buffer=1):
for (x, y) in path:
plotted_inst[x - buffer:x + buffer + 1, y - buffer:y + buffer +
1] = col
return plotted_inst
if __name__ == '__main__':
DemoApp().run()
| 37.578818 | 78 | 0.594022 |
acf0c773ce6ed7e90dbee1bd63e6e7d6740e32f0 | 4,617 | py | Python | instagramapp/models.py | Leina33/Gram | c03cd1b946ad4afce94b03e804000078b86dfc71 | [
"MIT"
] | null | null | null | instagramapp/models.py | Leina33/Gram | c03cd1b946ad4afce94b03e804000078b86dfc71 | [
"MIT"
] | 3 | 2021-06-02T00:53:46.000Z | 2021-06-10T22:29:52.000Z | instagramapp/models.py | Leina33/Gram | c03cd1b946ad4afce94b03e804000078b86dfc71 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from pyuploadcare.dj.models import ImageField
# Create your models here.
import datetime as dt
class Profile(models.Model):
class Meta:
db_table = 'profile'
bio = models.TextField(max_length=200, null=True, blank=True, default="bio")
profilepic = models.ImageField(upload_to='picture/', null=True, blank=True)
user=models.OneToOneField(User, on_delete=models.CASCADE, blank=True, related_name="profile")
followers = models.ManyToManyField(User, related_name="followers", blank=True)
following = models.ManyToManyField(User, related_name="following", blank=True)
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
def follow_user(self, follower):
return self.following.add(follower)
def unfollow_user(self, to_unfollow):
return self.following.remove(to_unfollow)
def is_following(self, checkuser):
return checkuser in self.following.all()
def get_number_of_followers(self):
if self.followers.count():
return self.followers.count()
else:
return 0
def get_number_of_following(self):
if self.following.count():
return self.following.count()
else:
return 0
@classmethod
def search_users(cls, search_term):
profiles = cls.objects.filter(user__username__icontains=search_term)
return profiles
def __str__(self):
return self.user.username
class Location(models.Model):
name = models.CharField(max_length=30)
def save_location(self):
self.save()
def delete_location(self):
self.delete()
def __str__(self):
return self.name
class tags(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
def save_tags(self):
self.save()
def delete_tags(self):
self.delete()
class Image(models.Model):
image=models.ImageField(upload_to='picture/', )
name = models.CharField(max_length=40)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, related_name="images")
description=models.TextField()
location=models.ForeignKey(Location, null=True)
tags=models.ManyToManyField(tags, blank=True)
likes = models.IntegerField(default=0)
comments= models.TextField(blank=True)
def __str__(self):
return self.name
def save_image(self):
self.save()
@classmethod
def delete_image_by_id(cls, id):
pictures = cls.objects.filter(pk=id)
pictures.delete()
@classmethod
def get_image_by_id(cls, id):
pictures = cls.objects.get(pk=id)
return pictures
@classmethod
def filter_by_tag(cls, tags):
pictures = cls.objects.filter(tags=tags)
return pictures
@classmethod
def filter_by_location(cls, location):
pictures = cls.objects.filter(location=location)
return pictures
@classmethod
def search_image(cls, search_term):
pictures = cls.objects.filter(name__icontains=search_term)
return pictures
@classmethod
def update_image(cls, id):
pictures=cls.objects.filter(id=id).update(id=id)
return pictures
@classmethod
def update_description(cls, id):
pictures = cls.objects.filter(id=id).update(id=id)
return pictures
class Followers(models.Model):
'''
followers
'''
user = models.CharField(max_length=20, default="")
follower = models.CharField(max_length=20, default="")
class Review(models.Model):
user = models.ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE, related_name='user')
image = models.ForeignKey(Image, on_delete=models.CASCADE, related_name="review")
comment = models.TextField()
def save_comment(self):
self.save()
def get_comment(self, id):
comments = Review.objects.filter(image_id =id)
return comments
def __str__(self):
return self.comment
class NewsLetterRecipients(models.Model):
name = models.CharField(max_length = 30)
email = models.EmailField()
class Like(models.Model):
user = models.ForeignKey(User)
image = models.ForeignKey(Image)
value = models.IntegerField(default=True, null=True, blank=True)
def save_like(self):
self.save()
def __str__(self):
return str(self.user) + ':' + str(self.image) + ':' + str(self.value)
class Meta:
unique_together = ("user", "image", "value") | 26.843023 | 104 | 0.669049 |
acf0c95ddca652ee1d6b0d20c549762c807497e5 | 1,848 | py | Python | menu.py | umarbrowser/pyMemo | 1046cb74b9f100da76c79996a6567a4940a23666 | [
"Apache-2.0"
] | null | null | null | menu.py | umarbrowser/pyMemo | 1046cb74b9f100da76c79996a6567a4940a23666 | [
"Apache-2.0"
] | null | null | null | menu.py | umarbrowser/pyMemo | 1046cb74b9f100da76c79996a6567a4940a23666 | [
"Apache-2.0"
] | null | null | null | #imports notebook and sys that is operating
#system module
import sys
from notebook import Notebook, Note
#menu class for displaying interface
class Menu:
def __init__(self):
self.notebook = Notebook()
self.choices = {
'1': self.show_notes,
'2': self.search_notes,
'3': self.add_note,
'4': self.modify_note,
'5': self.quit
}
def display_menu(self):
print('''
Notebook Menu
1. Show all Notes
2. Search Notes
3. Add Note
4. Modify Note
5. Quit
''')
def run(self):
while True:
self.display_menu()
choice = input('Enter an option: ')
action = self.choices.get(choice)
if action:
action()
else:
print('{0} is not a valid choice'.format(choice))
def show_notes(self, note=None):
if not note:
notes = self.notebook.notes
for note in notes:
print('{0}: {1}\n{2}'.format(
note.id, note.tags, note.memo))
def search_notes(self):
filter = input('Seach for: ')
notes = self.notebook.search(filter)
self.show_notes(notes)
def add_note(self):
memo = input('Enter a memo: ')
self.notebook.new_note(memo)
print('Your note has been added.')
def modify_note(self):
id = input('Enter a note id: ')
memo = input('Enter a memo: ')
tags = input('Enter a tags: ')
if memo:
self.notebook.modify_memo(id, memo)
if tags:
self.notebook.modify_memo(id, tags)
def quit(self):
print('Thank you for using your notebook today.')
sys.exit(0)
if __name__ == '__main__':
Menu().run()
| 24.972973 | 66 | 0.524351 |
acf0c9a99ddc2f53a4f06592bc907086be9823fa | 2,508 | py | Python | src/apscheduler/eventbrokers/local.py | sasirajpuvvada/apscheduler | 8b68b6c5d1c63faae1ba3769b6475b396328e3a3 | [
"MIT"
] | 4,294 | 2015-12-25T19:52:20.000Z | 2022-03-31T19:40:12.000Z | src/apscheduler/eventbrokers/local.py | sasirajpuvvada/apscheduler | 8b68b6c5d1c63faae1ba3769b6475b396328e3a3 | [
"MIT"
] | 505 | 2015-12-03T13:57:22.000Z | 2022-03-31T00:32:56.000Z | src/apscheduler/eventbrokers/local.py | sasirajpuvvada/apscheduler | 8b68b6c5d1c63faae1ba3769b6475b396328e3a3 | [
"MIT"
] | 692 | 2015-12-24T22:54:56.000Z | 2022-03-29T09:32:02.000Z | from __future__ import annotations
from asyncio import iscoroutinefunction
from concurrent.futures import ThreadPoolExecutor
from contextlib import ExitStack
from threading import Lock
from typing import Any, Callable, Iterable, Optional
import attr
from ..abc import Subscription
from ..events import Event
from ..util import reentrant
from .base import BaseEventBroker
@reentrant
@attr.define(eq=False)
class LocalEventBroker(BaseEventBroker):
_executor: ThreadPoolExecutor = attr.field(init=False)
_exit_stack: ExitStack = attr.field(init=False)
_subscriptions_lock: Lock = attr.field(init=False, factory=Lock)
def __enter__(self):
self._exit_stack = ExitStack()
self._executor = self._exit_stack.enter_context(ThreadPoolExecutor(1))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._exit_stack.__exit__(exc_type, exc_val, exc_tb)
del self._executor
def subscribe(self, callback: Callable[[Event], Any],
event_types: Optional[Iterable[type[Event]]] = None, *,
one_shot: bool = False) -> Subscription:
if iscoroutinefunction(callback):
raise ValueError('Coroutine functions are not supported as callbacks on a synchronous '
'event source')
with self._subscriptions_lock:
return super().subscribe(callback, event_types, one_shot=one_shot)
def unsubscribe(self, token: object) -> None:
with self._subscriptions_lock:
super().unsubscribe(token)
def publish(self, event: Event) -> None:
self.publish_local(event)
def publish_local(self, event: Event) -> None:
event_type = type(event)
with self._subscriptions_lock:
one_shot_tokens: list[object] = []
for token, subscription in self._subscriptions.items():
if subscription.event_types is None or event_type in subscription.event_types:
self._executor.submit(self._deliver_event, subscription.callback, event)
if subscription.one_shot:
one_shot_tokens.append(subscription.token)
for token in one_shot_tokens:
super().unsubscribe(token)
def _deliver_event(self, func: Callable[[Event], Any], event: Event) -> None:
try:
func(event)
except BaseException:
self._logger.exception('Error delivering %s event', event.__class__.__name__)
| 36.882353 | 99 | 0.673046 |
acf0cb740707d225e0ee3f276bc0f7e5c11f8e75 | 228 | py | Python | lang/Python/runtime-evaluation-in-an-environment-2.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/runtime-evaluation-in-an-environment-2.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/runtime-evaluation-in-an-environment-2.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | def eval_with_args(code, **kwordargs):
return eval(code, kwordargs)
code = '2 ** x'
eval_with_args(code, x=5) - eval_with_args(code, x=3)
24
code = '3 * x + y'
eval_with_args(code, x=5, y=2) - eval_with_args(code, x=3, y=1)
7
| 22.8 | 63 | 0.671053 |
acf0cb8ac5457a19728b0f02ead8b008f6305953 | 621 | py | Python | Python-desenvolvimento/ex086.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
] | null | null | null | Python-desenvolvimento/ex086.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
] | null | null | null | Python-desenvolvimento/ex086.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
] | null | null | null | #Crie um programa que declare uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado.
#No final, mostre a matriz na tela, com a formatação correta.
from random import randint
aleatorio = randint(0,10)
matriz = [[0,0,0], [0,0,0], [0,0,0]]
for l in range(0,3):#para ler o camando na linha
for c in range(0,3):# aqui direciona para a coluna
#matriz[l][c] = int(input(f'Digite um valor: ')) se eu quiser digitar valores
matriz[l][c] = aleatorio# para test
print('-='*30)
for l in range(0,3):
for c in range(0,3):
print(f'{matriz[l][c]:^6}', end = '')
print()
| 41.4 | 100 | 0.63124 |
acf0cb9f876c05439acb6bc82217cbb0c178a9c9 | 209 | py | Python | run.py | sohail-surge/911bot | 2fcf2d915d2e73b025c62e24a31561ce124a9cd3 | [
"BSD-3-Clause"
] | null | null | null | run.py | sohail-surge/911bot | 2fcf2d915d2e73b025c62e24a31561ce124a9cd3 | [
"BSD-3-Clause"
] | null | null | null | run.py | sohail-surge/911bot | 2fcf2d915d2e73b025c62e24a31561ce124a9cd3 | [
"BSD-3-Clause"
] | null | null | null | from slackbot.bot import Bot
import logging
import os
def main():
logging.basicConfig(level=logging.INFO)
logging.info("Running")
bot = Bot()
bot.run()
if __name__ == "__main__":
main()
| 14.928571 | 43 | 0.660287 |
acf0cba13c94937023062731a2b5c5b4ab175734 | 1,554 | py | Python | python_src/redisTS.py | jphaugla/grafana-redistimeseries | 89b14aac7fc17fa65e070aa5962f61ab2a590adb | [
"BSD-3-Clause"
] | null | null | null | python_src/redisTS.py | jphaugla/grafana-redistimeseries | 89b14aac7fc17fa65e070aa5962f61ab2a590adb | [
"BSD-3-Clause"
] | null | null | null | python_src/redisTS.py | jphaugla/grafana-redistimeseries | 89b14aac7fc17fa65e070aa5962f61ab2a590adb | [
"BSD-3-Clause"
] | null | null | null | import json
import csv
import redis
from datetime import datetime
from loadFiles import load_csv_to_db_with_labels, load_csv_to_db, connect
con = connect()
load_directory = "./data/"
load_csv_to_db_with_labels(con, load_directory + "DJI.csv", "DJI", "RG1", "ACCT1")
load_csv_to_db_with_labels(con, load_directory + "BTC-USD.csv", "BTC", "RG1", "ACCT1")
load_csv_to_db_with_labels(con, load_directory + "GSPC.csv", "GSPC", "RG1", "ACCT1")
load_csv_to_db_with_labels(con, load_directory + "IXIC.csv", "IXIC", "RG2", "ACCT1")
load_csv_to_db_with_labels(con, load_directory + "N225.csv", "N225", "RG2", "ACCT1")
load_csv_to_db_with_labels(con, load_directory + "TNX.csv", "TNX", "RG2", "ACCT1")
load_csv_to_db_with_labels(con, load_directory + "DJI.csv", "DJI", "RG1", "ACCT2")
load_csv_to_db_with_labels(con, load_directory + "BTC-USD.csv", "BTC", "RG1", "ACCT2")
load_csv_to_db_with_labels(con, load_directory + "GSPC.csv", "GSPC", "RG1", "ACCT2")
load_csv_to_db_with_labels(con, load_directory + "IXIC.csv", "IXIC", "RG2", "ACCT2")
load_csv_to_db_with_labels(con, load_directory + "N225.csv", "N225", "RG2", "ACCT2")
load_csv_to_db_with_labels(con, load_directory + "TNX.csv", "TNX", "RG2", "ACCT2")
load_csv_to_db(con, load_directory + "AAPL.csv", "APPL", "LABELS ticker AAPL")
load_csv_to_db(con, load_directory + "GOOG.csv", "GOOG", "LABELS ticker GOOG")
load_csv_to_db(con, load_directory + "IBM.csv", "IBM", "LABELS ticker IBM")
load_csv_to_db(con, load_directory + "TSLA.csv", "TSLA", "LABELS ticker TSLA")
print("successful completion")
| 43.166667 | 86 | 0.734878 |
acf0cdd8323bf8899c6db9e33357da39250873f9 | 13,183 | py | Python | tests.py | tammer123/discord_bot | 64c4eb9f468b219c06388be6582d0e6e96921f3f | [
"MIT"
] | null | null | null | tests.py | tammer123/discord_bot | 64c4eb9f468b219c06388be6582d0e6e96921f3f | [
"MIT"
] | null | null | null | tests.py | tammer123/discord_bot | 64c4eb9f468b219c06388be6582d0e6e96921f3f | [
"MIT"
] | null | null | null | import unittest
from constants import *
from wow import *
from util import *
class BaseTest(unittest.TestCase):
def test_for_normal_query_split(self):
# Tests to ensure that the query gets split properly when the bot gets a message.
# Example query: '!armory pve/pvp <name> <realm> <region>'
sample_query = '!armory pve jimo burning-legion us'
self.assertEqual(split_query(sample_query, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
def test_for_url_query_split(self):
# Tests to ensure that the query string gets split properly when the bot gets a url based message.
# Example query: '!armory pve/pvp <armory-link> <region>' (Accepts either a world of warcraft or battle net link)
sample_wow_url = '!armory pve https://worldofwarcraft.com/en-us/character/burning-legion/jimo us'
sample_battlenet_url = '!armory pve http://us.battle.net/wow/en/character/burning-legion/jimo/advanced us'
self.assertEqual(split_query(sample_wow_url, 'pve'), ['jimo', 'burning-legion', 'pve', 'us'])
self.assertEqual(split_query(sample_battlenet_url, 'pvp'), ['jimo', 'burning-legion', 'pvp', 'us'])
def test_for_warrior_class(self):
# Makes sure that when the id for the Warrior class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARRIOR),
{'colour': 0xC79C6E, 'name': 'Warrior'})
def test_for_paladin_class(self):
# Makes sure that when the id for the Paladin class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PALADIN),
{'colour': 0xF58CBA, 'name': 'Paladin'})
def test_for_hunter_class(self):
# Makes sure that when the id for the Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_HUNTER),
{'colour': 0xABD473, 'name': 'Hunter'})
def test_for_rogue_class(self):
# Makes sure that when the id for the Rogue class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_ROGUE),
{'colour': 0xFFF569, 'name': 'Rogue'})
def test_for_priest_class(self):
# Makes sure that when the id for the Priest class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_PRIEST),
{'colour': 0xFFFFFF, 'name': 'Priest'})
def test_for_death_knight_class(self):
# Makes sure that when the id for the Death Knight class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEATH_KNIGHT),
{'colour': 0xC41F3B, 'name': 'Death Knight'})
def test_for_shaman_class(self):
# Makes sure that when the id for the Shaman class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_SHAMAN),
{'colour': 0x0070DE, 'name': 'Shaman'})
def test_for_mage_class(self):
# Makes sure that when the id for the Mage class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MAGE),
{'colour': 0x69CCF0, 'name': 'Mage'})
def test_for_warlock_class(self):
# Makes sure that when the id for the Warlock class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_WARLOCK),
{'colour': 0x9482C9, 'name': 'Warlock'})
def test_for_monk_class(self):
# Makes sure that when the id for the Monk class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_MONK),
{'colour': 0x00FF96, 'name': 'Monk'})
def test_for_druid_class(self):
# Makes sure that when the id for the Druid class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DRUID),
{'colour': 0xFF7D0A, 'name': 'Druid'})
def test_for_demon_hunter_class(self):
# Makes sure that when the id for the Demon Hunter class is passed we get the
# correct name and colour.
self.assertEqual(class_details(CLASS_DEMON_HUNTER),
{'colour': 0xA330C9, 'name': 'Demon Hunter'})
def test_for_faction_name(self):
# Makes sure that when the id for either the Horde or Alliance faction is
# passsed we get the correct name in return.
self.assertEqual(faction_details(FACTION_ALLIANCE), 'Alliance')
self.assertEqual(faction_details(FACTION_HORDE), 'Horde')
def test_for_achievement_progress(self):
# Passes in some mock API data and expects it to return as completed.
# Tests for accuracy on each id check, not API data.
self.maxDiff = None
input_data_horde_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5356, 5353, 5349, 11191, 11192, 11874, 12110, 12111]
}
}
input_data_alliance_sample = {
"achievements": {
"achievementsCompleted": [11611, 11162, 11185, 11184, 2090, 2093,
2092, 2091, 11194, 11581, 11195, 11874, 5343, 5339, 5334, 11192, 11874, 11875, 12110]
}
}
expected_horde_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_HIGH_WARLORD_NAME,
'rbg_2000_name': AC_CHAMPION_NAME,
'rbg_1500_name': AC_FIRST_SERGEANT_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Cutting Edge',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Ahead of the Curve',
'atbt_feat': 'Cutting Edge'
}
expected_alliance_data = {
'challenging_look': 'Completed',
'keystone_master': 'Completed',
'keystone_conqueror': 'Completed',
'keystone_challenger': 'Completed',
'arena_challenger': 'Completed',
'arena_rival': 'Completed',
'arena_duelist': 'Completed',
'arena_gladiator': 'Completed',
'rbg_2400_name': AC_GRAND_MARSHALL_NAME,
'rbg_2000_name': AC_LIEAUTENANT_COMMANDER_NAME,
'rbg_1500_name': AC_SERGEANT_MAJOR_NAME,
'rbg_2400': 'Completed',
'rbg_2000': 'Completed',
'rbg_1500': 'Completed',
'en_feat': 'Ahead of the Curve',
'tov_feat': 'Ahead of the Curve',
'nh_feat': 'Cutting Edge',
'tos_feat': 'Cutting Edge',
'atbt_feat': 'Ahead of the Curve'
}
self.assertEqual(character_achievements(input_data_horde_sample, 'Horde'), expected_horde_data)
self.assertEqual(character_achievements(input_data_alliance_sample, 'Alliance'), expected_alliance_data)
def test_pvp_progression(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
self.maxDiff = None
sample_data = {
"pvp": {
"brackets": {
"ARENA_BRACKET_2v2": {
"rating": 5928,
},
"ARENA_BRACKET_3v3": {
"rating": 1858,
},
"ARENA_BRACKET_RBG": {
"rating": 5999,
},
"ARENA_BRACKET_2v2_SKIRMISH": {
"rating": 2985,
}
}
},
"totalHonorableKills": 888399
}
expected_data = {
'2v2': 5928,
'2v2s': 2985,
'3v3': 1858,
'rbg': 5999,
'kills': 888399
}
self.assertEqual(character_arena_progress(sample_data), expected_data)
def test_pve_progression(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
self.maxDiff = None
sample_data = {
"progression": {
"raids": [
{
"id": 8026,
"bosses": [{
"lfrKills": 19,
"normalKills": 8,
"heroicKills": 5,
"mythicKills": 3,
},
{
"lfrKills": 3,
"normalKills": 7,
"heroicKills": 3,
"mythicKills": 2,
}]
},
{
"id": 8440,
"bosses": [{
"lfrKills": 7,
"normalKills": 1,
"heroicKills": 1,
"mythicKills": 0,
}]
},
{
"id": 8524,
"bosses": [{
"lfrKills": 3,
"normalKills": 2,
"heroicKills": 4,
"mythicKills": 1,
}]
},
{
"id": 8025,
"bosses": [{
"lfrKills": 3,
"normalKills": 2,
"heroicKills": 1,
"mythicKills": 0,
},
{
"lfrKills": 5,
"normalKills": 2,
"heroicKills": 2,
"mythicKills": 0,
}]
},
{
"id": 8638,
"bosses": [{
"lfrKills": 0,
"normalKills": 2,
"heroicKills": 1,
"mythicKills": 0,
},
{
"lfrKills": 0,
"normalKills": 2,
"heroicKills": 2,
"mythicKills": 0,
},
{
"lfrKills": 0,
"normalKills": 2,
"heroicKills": 2,
"mythicKills": 0,
}]
}]
}
}
expected_data = {
'emerald_nightmare':{
'lfr':2,
'normal':2,
'heroic':2,
'mythic':2,
'bosses':2
},
'trial_of_valor':{
'lfr':1,
'normal':1,
'heroic':1,
'mythic':0,
'bosses':1
},
'the_nighthold':{
'lfr':2,
'normal':2,
'heroic':2,
'mythic':0,
'bosses':2
},
'tomb_of_sargeras': {
'lfr':1,
'normal':1,
'heroic':1,
'mythic':1,
'bosses':1
},
'antorus_the_burning_throne': {
'lfr':0,
'normal':3,
'heroic':3,
'mythic':0,
'bosses':3
}
}
self.assertEqual(character_progression(sample_data), expected_data)
def test_player_talents(self):
# Passes in some mock API data and expects it to return an object with the correct data.
# Tests for accuracy on each data check, not API data.
sample_data = {
'talents': [
{
'selected':True,
'spec':{
'name':'Holy',
'role':'HEALING'
}
},
{
'spec':{
'name':'Shadow',
'role': 'DAMAGE'
}
},
{
'spec':{
'name':'Discipline',
'role':'HEALING'
}
}
]}
expected_data = {
'active_spec': 'Holy'
}
self.assertEqual(character_talents(sample_data), expected_data)
if __name__ == '__main__':
unittest.main()
| 35.533693 | 121 | 0.485322 |
acf0cee26276f85d82cab75a99306521a6352275 | 1,518 | py | Python | graphlan/pyphlan/conv_fna2rxl.py | seedpcseed/DiTaxa | 127177deb630ad66520a2fdae1793417cd77ee99 | [
"Apache-2.0"
] | 10 | 2018-06-03T07:13:58.000Z | 2022-02-19T07:11:00.000Z | graphlan/pyphlan/conv_fna2rxl.py | seedpcseed/DiTaxa | 127177deb630ad66520a2fdae1793417cd77ee99 | [
"Apache-2.0"
] | 2 | 2020-02-18T16:46:05.000Z | 2020-10-08T23:03:02.000Z | graphlan/pyphlan/conv_fna2rxl.py | seedpcseed/DiTaxa | 127177deb630ad66520a2fdae1793417cd77ee99 | [
"Apache-2.0"
] | 6 | 2018-06-06T09:03:16.000Z | 2020-09-04T15:34:21.000Z | #!/usr/bin/env python
import sys
import collections
import utils
try:
import argparse as ap
import bz2
import random
from Bio import SeqIO
except ImportError:
sys.stderr.write( "argparse not found" )
sys.exit(-1)
def read_params( args ):
p = ap.ArgumentParser(description='Convert fasta files files in tab-delimited'
' files with the seed as first field followed by the other IDs\n')
p.add_argument( 'fna', nargs='?', default=None, type=str,
help= "the input uc file [stdin if not present]")
p.add_argument('rxl', nargs='?', default=None, type=str,
help= "the output txt file compresse if fiven with bz2 extension\n"
"[stdout if not present]")
"""
p.add_argument('--subsample', metavar="Subsampling rate",
default=1.0, type=float )
p.add_argument('-n', metavar="Minimum number of matching taxa",
default=0, type=int )
p.add_argument('-p', metavar="Prefix for taxon names",
default="", type=str )
"""
return vars( p.parse_args() )
if __name__ == "__main__":
args = read_params( sys.argv )
fna = SeqIO.to_dict(SeqIO.parse( utils.openr(args['fna']), "fasta"))
with utils.openw(args['rxl']) as out:
n = len(fna.values()[0])
out.write( str(len(fna))+" "+str(n)+"\n" )
for k,v in fna.items():
if len(k) > 14:
k = k[:14]
out.write( str(k)+" "*(15-len(str(k)[1:]))+str(v.seq) +"\n" )
| 31.625 | 82 | 0.585639 |
acf0cf25babe9c4d58c50803c5f9b66dd5c5b2fe | 1,581 | py | Python | tests/util/temporary_working_directory_test.py | mostafaelhoushi/CompilerGym | cf11c58333d263b3ebc5ece2110a429e9af499c1 | [
"MIT"
] | 562 | 2020-12-21T14:10:20.000Z | 2022-03-31T21:23:55.000Z | tests/util/temporary_working_directory_test.py | mostafaelhoushi/CompilerGym | cf11c58333d263b3ebc5ece2110a429e9af499c1 | [
"MIT"
] | 433 | 2020-12-22T03:40:41.000Z | 2022-03-31T18:16:17.000Z | tests/util/temporary_working_directory_test.py | mostafaelhoushi/CompilerGym | cf11c58333d263b3ebc5ece2110a429e9af499c1 | [
"MIT"
] | 88 | 2020-12-22T08:22:00.000Z | 2022-03-20T19:00:40.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:temporary_working_directory."""
import os
import tempfile
from pathlib import Path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from tests.test_main import main
def test_temporary_working_directory_tempdir():
with temporary_working_directory() as cwdir:
# Suffix test rather than equality test because on macOS temporary
# directories can have a /private prefix.
assert os.getcwd().endswith(str(cwdir))
assert cwdir.is_dir()
assert not list(cwdir.iterdir())
(cwdir / "test").touch()
assert (cwdir / "test").is_file()
# Out of scope, the directory is removed.
assert not cwdir.is_dir()
def test_temporary_working_directory():
with tempfile.TemporaryDirectory() as d:
path = Path(d)
with temporary_working_directory(path) as cwdir:
assert path == cwdir
# Suffix test rather than equality test because on macOS temporary
# directories can have a /private prefix.
assert os.getcwd().endswith(str(path))
assert cwdir.is_dir()
assert not list(cwdir.iterdir())
(cwdir / "test").touch()
assert (cwdir / "test").is_file()
# Out of scope, the directory is preserved.
assert path.is_dir()
if __name__ == "__main__":
main()
| 33.638298 | 85 | 0.674257 |
acf0d01b8869966ca367bb73953ded471476747a | 672 | py | Python | Retos/Nivel Avanzado/Reto 13.py | AlexPC23/Python | 77689d74c5444faa1aa253a122602307e52ac581 | [
"Apache-2.0"
] | null | null | null | Retos/Nivel Avanzado/Reto 13.py | AlexPC23/Python | 77689d74c5444faa1aa253a122602307e52ac581 | [
"Apache-2.0"
] | null | null | null | Retos/Nivel Avanzado/Reto 13.py | AlexPC23/Python | 77689d74c5444faa1aa253a122602307e52ac581 | [
"Apache-2.0"
] | null | null | null | #Escribe un programa que sea capaz de encontrar la diferencia completa entre dos fechas, mostrando días, horas, minutos y segundos.
from datetime import datetime
def date_diff_in_seconds(dt2, dt1):
timedelta = dt2 - dt1
return timedelta.days * 24 * 3600 + timedelta.seconds
def dhms_from_seconds(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
return (days, hours, minutes, seconds)
date1 = datetime.strptime('2015-01-01 01:00:00', '%Y-%m-%d %H:%M:%S')
date2 = datetime.now()
print("\n%d días, %d horas, %d minutos, %d segundos" % dhms_from_seconds(date_diff_in_seconds(date2, date1))) | 37.333333 | 131 | 0.729167 |
acf0d050f1bfea93638870974dc501bb973ba1a2 | 74,654 | py | Python | toontown/battle/BattleCalculatorAI.py | cmarshall108/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | 1 | 2021-02-25T06:02:04.000Z | 2021-02-25T06:02:04.000Z | toontown/battle/BattleCalculatorAI.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | null | null | null | toontown/battle/BattleCalculatorAI.py | AnythingTechPro/Project-Altis | 7ead614abdb5072ca06323982de461f4e775d1b3 | [
"Apache-2.0"
] | 2 | 2021-02-25T06:02:05.000Z | 2021-06-19T03:11:22.000Z | import random, sys
from toontown.battle.BattleBase import *
from toontown.battle.BattleGlobals import *
from toontown.battle.DistributedBattleAI import *
from toontown.toonbase.ToontownBattleGlobals import *
from toontown.suit import DistributedSuitBaseAI
from toontown.battle import SuitBattleGlobals
from toontown.battle import BattleExperienceAI
from toontown.toon import NPCToons
from toontown.pets import PetTricks, DistributedPetProxyAI
from toontown.hood import ZoneUtil
from toontown.toonbase.ToonPythonUtil import lerp
class BattleCalculatorAI:
AccuracyBonuses = [0, 20, 40, 60]
DamageBonuses = [0, 20, 20, 20]
AttackExpPerTrack = [0, 10, 20, 30, 40, 50, 60]
TRAP_CONFLICT = -2
APPLY_HEALTH_ADJUSTMENTS = 1
TOONS_TAKE_NO_DAMAGE = 0
CAP_HEALS = 1
CLEAR_SUIT_ATTACKERS = 1
SUITS_UNLURED_IMMEDIATELY = 1
CLEAR_MULTIPLE_TRAPS = 0
KBBONUS_LURED_FLAG = 0
KBBONUS_TGT_LURED = 1
notify = DirectNotifyGlobal.directNotify.newCategory('BattleCalculatorAI')
toonsAlwaysHit = simbase.config.GetBool('toons-always-hit', False)
toonsAlwaysMiss = simbase.config.GetBool('toons-always-miss', False)
toonsAlways5050 = simbase.config.GetBool('toons-always-5050', False)
suitsAlwaysHit = simbase.config.GetBool('suits-always-hit', False)
suitsAlwaysMiss = simbase.config.GetBool('suits-always-miss', False)
immortalSuits = simbase.config.GetBool('immortal-suits', False)
propAndOrganicBonusStack = simbase.config.GetBool('prop-and-organic-bonus-stack', False)
def __init__(self, battle, tutorialFlag = 0):
self.battle = battle
self.SuitAttackers = {}
self.currentlyLuredSuits = {}
self.currentlyWetSuits = []
self.successfulLures = {}
self.toonAtkOrder = []
self.toonHPAdjusts = {}
self.toonSkillPtsGained = {}
self.traps = {}
self.npcTraps = {}
self.suitAtkStats = {}
self.__clearBonuses(hp=1)
self.__clearBonuses(hp=0)
self.delayedUnlures = []
self.__skillCreditMultiplier = simbase.air.holidayManager.getXpMultiplier()
self.tutorialFlag = tutorialFlag
self.trainTrapTriggered = False
self.fireDifficulty = 0
def setSkillCreditMultiplier(self, mult):
self.__skillCreditMultiplier = mult
def getSkillCreditMultiplier(self):
return self.__skillCreditMultiplier
def cleanup(self):
self.battle = None
def __calcToonAtkHit(self, attackIndex, atkTargets):
if len(atkTargets) == 0:
return (0, 0)
if self.tutorialFlag:
return (1, 95)
if self.toonsAlways5050:
roll = random.randint(0, 99)
if roll < 50:
return (1, 95)
else:
return (0, 0)
if self.toonsAlwaysHit:
return (1, 95)
elif self.toonsAlwaysMiss:
return (0, 0)
debug = self.notify.getDebug()
attack = self.battle.toonAttacks[attackIndex]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
hasAccuracyBuff = False
toon = simbase.air.doId2do.get(attack[TOON_ID_COL])
if toon:
if toon.hasBuff(BGagAccuracy):
if not ZoneUtil.isDynamicZone(toon.zoneId):
if ZoneUtil.getWhereName(toon.zoneId, True) in ('street', 'factoryExterior', 'cogHQExterior'):
hasAccuracyBuff = True
if atkTrack == NPCSOS:
return (1, 95)
if atkTrack == FIRE:
return (1, 95)
if atkTrack == TRAP:
if debug:
self.notify.debug('Attack is a trap, so it hits regardless')
attack[TOON_ACCBONUS_COL] = 0
return (1, 100)
elif atkTrack == DROP and attack[TOON_TRACK_COL] == NPCSOS:
unluredSuits = 0
for tgt in atkTargets:
if not self.__suitIsLured(tgt.getDoId()):
unluredSuits = 1
if unluredSuits == 0:
attack[TOON_ACCBONUS_COL] = 1
return (0, 0)
elif atkTrack == DROP:
allLured = True
for i in xrange(len(atkTargets)):
if self.__suitIsLured(atkTargets[i].getDoId()):
pass
else:
allLured = False
if allLured:
attack[TOON_ACCBONUS_COL] = 1
return (0, 0)
elif atkTrack == PETSOS:
return self.__calculatePetTrickSuccess(attack)
tgtDef = 0
numLured = 0
if atkTrack != HEAL:
for currTarget in atkTargets:
thisSuitDef = self.__targetDefense(currTarget, atkTrack)
if debug:
self.notify.debug('Examining suit def for toon attack: ' + str(thisSuitDef))
tgtDef = min(thisSuitDef, tgtDef)
if self.__suitIsLured(currTarget.getDoId()):
numLured += 1
trackExp = self.__toonTrackExp(attack[TOON_ID_COL], atkTrack)
for currOtherAtk in self.toonAtkOrder:
if currOtherAtk != attack[TOON_ID_COL]:
nextAttack = self.battle.toonAttacks[currOtherAtk]
nextAtkTrack = self.__getActualTrack(nextAttack)
if atkTrack == nextAtkTrack and attack[TOON_TGT_COL] == nextAttack[TOON_TGT_COL]:
currTrackExp = self.__toonTrackExp(nextAttack[TOON_ID_COL], atkTrack)
if debug:
self.notify.debug('Examining toon track exp bonus: ' + str(currTrackExp))
trackExp = max(currTrackExp, trackExp)
if debug:
if atkTrack == HEAL:
self.notify.debug('Toon attack is a heal, no target def used')
else:
self.notify.debug('Suit defense used for toon attack: ' + str(tgtDef))
self.notify.debug('Toon track exp bonus used for toon attack: ' + str(trackExp))
if attack[TOON_TRACK_COL] == NPCSOS:
randChoice = 0
else:
randChoice = random.randint(0, 99)
propAcc = AvPropAccuracy[atkTrack][atkLevel]
if hasAccuracyBuff:
propAcc *= BGagAccuracyMultiplier
if atkTrack == LURE:
treebonus = self.__toonCheckGagBonus(attack[TOON_ID_COL], atkTrack, atkLevel)
propBonus = self.__checkPropBonus(atkTrack)
if self.propAndOrganicBonusStack:
propAcc = 0
if treebonus:
self.notify.debug('using organic bonus lure accuracy')
propAcc += AvLureBonusAccuracy[atkLevel]
if propBonus:
self.notify.debug('using prop bonus lure accuracy')
propAcc += AvLureBonusAccuracy[atkLevel]
elif treebonus or propBonus:
self.notify.debug('using oragnic OR prop bonus lure accuracy')
propAcc = AvLureBonusAccuracy[atkLevel]
attackAcc = propAcc + trackExp + tgtDef
currAtk = self.toonAtkOrder.index(attackIndex)
if currAtk > 0 and atkTrack != HEAL:
prevAtkId = self.toonAtkOrder[currAtk - 1]
prevAttack = self.battle.toonAttacks[prevAtkId]
prevAtkTrack = self.__getActualTrack(prevAttack)
lure = atkTrack == LURE and (not attackAffectsGroup(atkTrack, atkLevel,
attack[TOON_TRACK_COL]) and attack[TOON_TGT_COL] in self.successfulLures or attackAffectsGroup(atkTrack, atkLevel, attack[TOON_TRACK_COL]))
if atkTrack == prevAtkTrack and (attack[TOON_TGT_COL] == prevAttack[TOON_TGT_COL] or lure):
if prevAttack[TOON_ACCBONUS_COL] == 1:
if debug:
self.notify.debug('DODGE: Toon attack track dodged')
elif prevAttack[TOON_ACCBONUS_COL] == 0:
if debug:
self.notify.debug('HIT: Toon attack track hit')
attack[TOON_ACCBONUS_COL] = prevAttack[TOON_ACCBONUS_COL]
return (not attack[TOON_ACCBONUS_COL], attackAcc)
atkAccResult = attackAcc
if debug:
self.notify.debug('setting atkAccResult to %d' % atkAccResult)
acc = attackAcc + self.__calcToonAccBonus(attackIndex)
if atkTrack != LURE and atkTrack != HEAL:
if atkTrack != DROP:
if numLured == len(atkTargets):
if debug:
self.notify.debug('all targets are lured, attack hits')
attack[TOON_ACCBONUS_COL] = 0
return (1, 100)
else:
luredRatio = float(numLured) / float(len(atkTargets))
accAdjust = 100 * luredRatio
if accAdjust > 0 and debug:
self.notify.debug(str(numLured) + ' out of ' + str(len(atkTargets)) + ' targets are lured, so adding ' + str(accAdjust) + ' to attack accuracy')
acc += accAdjust
elif numLured == len(atkTargets):
if debug:
self.notify.debug('all targets are lured, attack misses')
attack[TOON_ACCBONUS_COL] = 0
return (0, 0)
if acc > MaxToonAcc:
acc = MaxToonAcc
if randChoice < acc:
if debug:
self.notify.debug('HIT: Toon attack rolled' + str(randChoice) + 'to hit with an accuracy of' + str(acc))
attack[TOON_ACCBONUS_COL] = 0
else:
if debug:
self.notify.debug('MISS: Toon attack rolled' + str(randChoice) + 'to hit with an accuracy of' + str(acc))
attack[TOON_ACCBONUS_COL] = 1
return (not attack[TOON_ACCBONUS_COL], atkAccResult)
def __toonTrackExp(self, toonId, track):
toon = self.battle.getToon(toonId)
if toon != None:
toonExpLvl = toon.experience.getExpLevel(track)
exp = AttackExpPerTrack[toonExpLvl]
if track == HEAL:
exp = exp * 0.5
self.notify.debug('Toon track exp: ' + str(toonExpLvl) + ' and resulting acc bonus: ' + str(exp))
return exp
else:
return 0
def __toonCheckGagBonus(self, toonId, track, level):
toon = self.battle.getToon(toonId)
if toon != None:
return toon.checkGagBonus(track, level)
else:
return False
def __checkPropBonus(self, track):
result = False
if self.battle.getInteractivePropTrackBonus() == track:
result = True
return result
def __targetDefense(self, suit, atkTrack):
if atkTrack == HEAL:
return 0
suitDef = SuitBattleGlobals.SuitAttributes[suit.dna.name]['def'][suit.getLevel()]
return -suitDef
def __createToonTargetList(self, attackIndex):
attack = self.battle.toonAttacks[attackIndex]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
targetList = []
if atkTrack == NPCSOS:
return targetList
if not attackAffectsGroup(atkTrack, atkLevel, attack[TOON_TRACK_COL]):
if atkTrack == HEAL:
target = attack[TOON_TGT_COL]
else:
target = self.battle.findSuit(attack[TOON_TGT_COL])
if target != None:
targetList.append(target)
elif atkTrack == HEAL or atkTrack == PETSOS:
if attack[TOON_TRACK_COL] == NPCSOS or atkTrack == PETSOS:
targetList = self.battle.activeToons
else:
for currToon in self.battle.activeToons:
if attack[TOON_ID_COL] != currToon:
targetList.append(currToon)
else:
targetList = self.battle.activeSuits
return targetList
def __prevAtkTrack(self, attackerId, toon = 1):
if toon:
prevAtkIdx = self.toonAtkOrder.index(attackerId) - 1
if prevAtkIdx >= 0:
prevAttackerId = self.toonAtkOrder[prevAtkIdx]
attack = self.battle.toonAttacks[prevAttackerId]
return self.__getActualTrack(attack)
else:
return NO_ATTACK
def getSuitTrapType(self, suitId):
if suitId in self.traps:
if self.traps[suitId][0] == TRAP_CONFLICT:
return NO_TRAP
else:
return self.traps[suitId][0]
else:
return NO_TRAP
def __suitTrapDamage(self, suitId):
if suitId in self.traps:
return self.traps[suitId][2]
else:
return 0
def addTrainTrapForJoiningSuit(self, suitId):
self.notify.debug('addTrainTrapForJoiningSuit suit=%d self.traps=%s' % (suitId, self.traps))
trapInfoToUse = None
for trapInfo in self.traps.values():
if trapInfo[0] == UBER_GAG_LEVEL_INDEX:
trapInfoToUse = trapInfo
break
if trapInfoToUse:
self.traps[suitId] = trapInfoToUse
else:
self.notify.warning('huh we did not find a train trap?')
def __addSuitGroupTrap(self, suitId, trapLvl, attackerId, allSuits, npcDamage = 0):
if npcDamage == 0:
if suitId in self.traps:
if self.traps[suitId][0] == TRAP_CONFLICT:
pass
else:
self.traps[suitId][0] = TRAP_CONFLICT
for suit in allSuits:
id = suit.doId
if id in self.traps:
self.traps[id][0] = TRAP_CONFLICT
else:
self.traps[id] = [TRAP_CONFLICT, 0, 0]
else:
toon = self.battle.getToon(attackerId)
organicBonus = toon.checkGagBonus(TRAP, trapLvl)
propBonus = self.__checkPropBonus(TRAP)
damage = getAvPropDamage(TRAP, trapLvl, toon.experience.getExp(TRAP), organicBonus, propBonus, self.propAndOrganicBonusStack)
if self.itemIsCredit(TRAP, trapLvl):
self.traps[suitId] = [trapLvl, attackerId, damage]
else:
self.traps[suitId] = [trapLvl, 0, damage]
self.notify.debug('calling __addLuredSuitsDelayed')
self.__addLuredSuitsDelayed(attackerId, targetId=-1, ignoreDamageCheck=True)
elif suitId in self.traps:
if self.traps[suitId][0] == TRAP_CONFLICT:
self.traps[suitId] = [trapLvl, 0, npcDamage]
elif not self.__suitIsLured(suitId):
self.traps[suitId] = [trapLvl, 0, npcDamage]
def __addSuitTrap(self, suitId, trapLvl, attackerId, npcDamage = 0):
if npcDamage == 0:
if suitId in self.traps:
if self.traps[suitId][0] == TRAP_CONFLICT:
pass
else:
self.traps[suitId][0] = TRAP_CONFLICT
else:
toon = self.battle.getToon(attackerId)
organicBonus = toon.checkGagBonus(TRAP, trapLvl)
propBonus = self.__checkPropBonus(TRAP)
damage = getAvPropDamage(TRAP, trapLvl, toon.experience.getExp(TRAP), organicBonus, propBonus, self.propAndOrganicBonusStack)
if self.itemIsCredit(TRAP, trapLvl):
self.traps[suitId] = [trapLvl, attackerId, damage]
else:
self.traps[suitId] = [trapLvl, 0, damage]
elif suitId in self.traps:
if self.traps[suitId][0] == TRAP_CONFLICT:
self.traps[suitId] = [trapLvl, 0, npcDamage]
elif not self.__suitIsLured(suitId):
self.traps[suitId] = [trapLvl, 0, npcDamage]
def __removeSuitTrap(self, suitId):
if suitId in self.traps:
del self.traps[suitId]
def __clearTrapCreator(self, creatorId, suitId = None):
if suitId == None:
for currTrap in self.traps.keys():
if creatorId == self.traps[currTrap][1]:
self.traps[currTrap][1] = 0
elif suitId in self.traps:
self.traps[suitId][1] = 0
def __trapCreator(self, suitId):
if suitId in self.traps:
return self.traps[suitId][1]
else:
return 0
def __initTraps(self):
self.trainTrapTriggered = False
keysList = self.traps.keys()
for currTrap in keysList:
if self.traps[currTrap][0] == TRAP_CONFLICT:
del self.traps[currTrap]
def __calcToonAtkHp(self, toonId):
attack = self.battle.toonAttacks[toonId]
targetList = self.__createToonTargetList(toonId)
atkHit, atkAcc = self.__calcToonAtkHit(toonId, targetList)
atkTrack, atkLevel, atkHp = self.__getActualTrackLevelHp(attack)
if not atkHit and atkTrack != HEAL:
return
validTargetAvail = 0
lureDidDamage = 0
currLureId = -1
for currTarget in xrange(len(targetList)):
attackLevel = -1
attackTrack = None
attackDamage = 0
toonTarget = 0
targetLured = 0
if atkTrack == HEAL or atkTrack == PETSOS:
targetId = targetList[currTarget]
toonTarget = 1
else:
targetId = targetList[currTarget].getDoId()
if atkTrack == LURE:
if self.getSuitTrapType(targetId) == NO_TRAP:
if self.notify.getDebug():
self.notify.debug('Suit lured, but no trap exists')
if SUITS_UNLURED_IMMEDIATELY:
if not self.__suitIsLured(targetId, prevRound=1):
if not self.__combatantDead(targetId, toon=toonTarget):
validTargetAvail = 1
rounds = NumRoundsLured[atkLevel]
wakeupChance = 100 - atkAcc * 2
npcLurer = attack[TOON_TRACK_COL] == NPCSOS
currLureId = self.__addLuredSuitInfo(targetId, -1, rounds, wakeupChance, toonId, atkLevel, lureId=currLureId, npc=npcLurer)
if self.notify.getDebug():
self.notify.debug('Suit lured for ' + str(rounds) + ' rounds max with ' + str(wakeupChance) + '% chance to wake up each round')
targetLured = 1
else:
attackTrack = TRAP
if targetId in self.traps:
trapInfo = self.traps[targetId]
attackLevel = trapInfo[0]
else:
attackLevel = NO_TRAP
attackDamage = self.__suitTrapDamage(targetId)
trapCreatorId = self.__trapCreator(targetId)
if trapCreatorId > 0:
self.notify.debug('Giving trap EXP to toon ' + str(trapCreatorId))
self.__addAttackExp(attack, track=TRAP, level=attackLevel, attackerId=trapCreatorId)
self.__clearTrapCreator(trapCreatorId, targetId)
lureDidDamage = 1
if self.notify.getDebug():
self.notify.debug('Suit lured right onto a trap! (' + str(AvProps[attackTrack][attackLevel]) + ',' + str(attackLevel) + ')')
if not self.__combatantDead(targetId, toon=toonTarget):
validTargetAvail = 1
targetLured = 1
if not SUITS_UNLURED_IMMEDIATELY:
if not self.__suitIsLured(targetId, prevRound=1):
if not self.__combatantDead(targetId, toon=toonTarget):
validTargetAvail = 1
rounds = NumRoundsLured[atkLevel]
wakeupChance = 100 - atkAcc * 2
npcLurer = attack[TOON_TRACK_COL] == NPCSOS
currLureId = self.__addLuredSuitInfo(targetId, -1, rounds, wakeupChance, toonId, atkLevel, lureId=currLureId, npc=npcLurer)
if self.notify.getDebug():
self.notify.debug('Suit lured for ' + str(rounds) + ' rounds max with ' + str(wakeupChance) + '% chance to wake up each round')
targetLured = 1
if attackLevel != -1:
self.__addLuredSuitsDelayed(toonId, targetId)
if targetLured and (not targetId in self.successfulLures or targetId in self.successfulLures and self.successfulLures[targetId][1] < atkLevel):
self.notify.debug('Adding target ' + str(targetId) + ' to successfulLures list')
self.successfulLures[targetId] = [toonId,
atkLevel,
atkAcc,
-1]
else:
if atkTrack == TRAP:
npcDamage = 0
if attack[TOON_TRACK_COL] == NPCSOS:
npcDamage = atkHp
if CLEAR_MULTIPLE_TRAPS:
if self.getSuitTrapType(targetId) != NO_TRAP:
self.__clearAttack(toonId)
return
if atkLevel == UBER_GAG_LEVEL_INDEX:
self.__addSuitGroupTrap(targetId, atkLevel, toonId, targetList, npcDamage)
if self.__suitIsLured(targetId):
self.notify.debug('Train Trap on lured suit %d, \n indicating with KBBONUS_COL flag' % targetId)
tgtPos = self.battle.activeSuits.index(targetList[currTarget])
attack[TOON_KBBONUS_COL][tgtPos] = KBBONUS_LURED_FLAG
else:
self.__addSuitTrap(targetId, atkLevel, toonId, npcDamage)
elif self.__suitIsLured(targetId) and atkTrack == SOUND:
self.notify.debug('Sound on lured suit, ' + 'indicating with KBBONUS_COL flag')
tgtPos = self.battle.activeSuits.index(targetList[currTarget])
attack[TOON_KBBONUS_COL][tgtPos] = KBBONUS_LURED_FLAG
attackLevel = atkLevel
attackTrack = atkTrack
toon = self.battle.getToon(toonId)
if attack[TOON_TRACK_COL] == NPCSOS and lureDidDamage != 1 or attack[TOON_TRACK_COL] == PETSOS:
attackDamage = atkHp
elif atkTrack == FIRE:
suit = self.battle.findSuit(targetId)
if suit:
costToFire = 1
abilityToFire = toon.getPinkSlips()
numLeft = abilityToFire - costToFire
if numLeft < 0:
numLeft = 0
toon.b_setPinkSlips(numLeft)
if costToFire > abilityToFire:
simbase.air.writeServerEvent('suspicious', avId=toonId, issue='Toon attempting to fire a %s cost cog with %s pinkslips' % (costToFire, abilityToFire))
print 'Not enough PinkSlips to fire cog - print a warning here'
else:
suit.skeleRevives = 0
attackDamage = suit.getHP()
else:
attackDamage = 0
bonus = 0
elif atkTrack == SQUIRT:
if targetId not in self.currentlyWetSuits:
self.currentlyWetSuits.append(targetId)
organicBonus = toon.checkGagBonus(attackTrack, attackLevel)
propBonus = self.__checkPropBonus(attackTrack)
attackDamage = getAvPropDamage(attackTrack, attackLevel, toon.experience.getExp(attackTrack), organicBonus, propBonus, self.propAndOrganicBonusStack)
elif atkTrack == ZAP:
organicBonus = toon.checkGagBonus(attackTrack, attackLevel)
propBonus = self.__checkPropBonus(attackTrack)
if self.__isWet(targetId) == 1:
if random.randint(0,99) <= InstaKillChance[atkLevel]:
suit = self.battle.findSuit(targetId)
if suit.getHP() > 500:
attackDamage = 500
else:
suit.b_setSkeleRevives(0)
attackDamage = suit.getHP()
else:
attackDamage = getAvPropDamage(attackTrack, attackLevel, toon.experience.getExp(attackTrack), organicBonus, propBonus, self.propAndOrganicBonusStack) * 2
else:
attackDamage = getAvPropDamage(attackTrack, attackLevel, toon.experience.getExp(attackTrack), organicBonus, propBonus, self.propAndOrganicBonusStack)
else:
organicBonus = toon.checkGagBonus(attackTrack, attackLevel)
propBonus = self.__checkPropBonus(attackTrack)
attackDamage = getAvPropDamage(attackTrack, attackLevel, toon.experience.getExp(attackTrack), organicBonus, propBonus, self.propAndOrganicBonusStack)
if not self.__combatantDead(targetId, toon=toonTarget):
if self.__suitIsLured(targetId) and atkTrack == DROP:
self.notify.debug('not setting validTargetAvail, since drop on a lured suit')
else:
validTargetAvail = 1
if attackLevel == -1 and not atkTrack == FIRE:
result = LURE_SUCCEEDED
elif atkTrack != TRAP:
result = attackDamage
if atkTrack == HEAL:
if not self.__attackHasHit(attack, suit=0):
result = result * 0.2
if self.notify.getDebug():
self.notify.debug('toon does ' + str(result) + ' healing to toon(s)')
else:
if self.__suitIsLured(targetId) and atkTrack == DROP:
result = 0
self.notify.debug('setting damage to 0, since drop on a lured suit')
if self.notify.getDebug():
self.notify.debug('toon does ' + str(result) + ' damage to suit')
else:
result = 0
if result != 0 or atkTrack == PETSOS:
targets = self.__getToonTargets(attack)
if targetList[currTarget] not in targets:
if self.notify.getDebug():
self.notify.debug('Target of toon is not accessible!')
continue
targetIndex = targets.index(targetList[currTarget])
if atkTrack == HEAL:
result = result / len(targetList)
if self.notify.getDebug():
self.notify.debug('Splitting heal among ' + str(len(targetList)) + ' targets')
if targetId in self.successfulLures and atkTrack == LURE:
self.notify.debug('Updating lure damage to ' + str(result))
self.successfulLures[targetId][3] = result
else:
attack[TOON_HP_COL][targetIndex] = result
if result > 0 and atkTrack != HEAL and atkTrack != DROP and atkTrack != PETSOS:
attackTrack = LURE
lureInfos = self.__getLuredExpInfo(targetId)
for currInfo in lureInfos:
if currInfo[3]:
self.notify.debug('Giving lure EXP to toon ' + str(currInfo[0]))
self.__addAttackExp(attack, track=attackTrack, level=currInfo[1], attackerId=currInfo[0])
self.__clearLurer(currInfo[0], lureId=currInfo[2])
if lureDidDamage:
if self.itemIsCredit(atkTrack, atkLevel):
self.notify.debug('Giving lure EXP to toon ' + str(toonId))
self.__addAttackExp(attack)
if not validTargetAvail and self.__prevAtkTrack(toonId) != atkTrack:
self.__clearAttack(toonId)
def __getToonTargets(self, attack):
track = self.__getActualTrack(attack)
if track == HEAL or track == PETSOS:
return self.battle.activeToons
else:
return self.battle.activeSuits
def __attackHasHit(self, attack, suit = 0):
if suit == 1:
for dmg in attack[SUIT_HP_COL]:
if dmg > 0:
return 1
return 0
else:
track = self.__getActualTrack(attack)
return not attack[TOON_ACCBONUS_COL] and track != NO_ATTACK
def __attackDamage(self, attack, suit = 0):
if suit:
for dmg in attack[SUIT_HP_COL]:
if dmg > 0:
return dmg
return 0
else:
for dmg in attack[TOON_HP_COL]:
if dmg > 0:
return dmg
return 0
def __isWet(self, suit):
if suit in self.currentlyWetSuits:
return 1
else:
return 0
def __attackDamageForTgt(self, attack, tgtPos, suit = 0):
if suit:
return attack[SUIT_HP_COL][tgtPos]
else:
return attack[TOON_HP_COL][tgtPos]
def __calcToonAccBonus(self, attackKey):
numPrevHits = 0
attackIdx = self.toonAtkOrder.index(attackKey)
for currPrevAtk in xrange(attackIdx - 1, -1, -1):
attack = self.battle.toonAttacks[attackKey]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
prevAttackKey = self.toonAtkOrder[currPrevAtk]
prevAttack = self.battle.toonAttacks[prevAttackKey]
prvAtkTrack, prvAtkLevel = self.__getActualTrackLevel(prevAttack)
if self.__attackHasHit(prevAttack) and (attackAffectsGroup(prvAtkTrack, prvAtkLevel, prevAttack[TOON_TRACK_COL]) or attackAffectsGroup(atkTrack, atkLevel, attack[TOON_TRACK_COL]) or attack[TOON_TGT_COL] == prevAttack[TOON_TGT_COL]) and atkTrack != prvAtkTrack:
numPrevHits += 1
if numPrevHits > 0 and self.notify.getDebug():
self.notify.debug('ACC BONUS: toon attack received accuracy ' + 'bonus of ' + str(AccuracyBonuses[numPrevHits]) + ' from previous attack by (' + str(attack[TOON_ID_COL]) + ') which hit')
return AccuracyBonuses[numPrevHits]
def __applyToonAttackDamages(self, toonId, hpbonus = 0, kbbonus = 0):
totalDamages = 0
if not APPLY_HEALTH_ADJUSTMENTS:
return totalDamages
attack = self.battle.toonAttacks[toonId]
track = self.__getActualTrack(attack)
if track != NO_ATTACK and track != SOS and track != TRAP and track != NPCSOS:
targets = self.__getToonTargets(attack)
for position in xrange(len(targets)):
if hpbonus:
if targets[position] in self.__createToonTargetList(toonId):
damageDone = attack[TOON_HPBONUS_COL]
else:
damageDone = 0
elif kbbonus:
if targets[position] in self.__createToonTargetList(toonId):
damageDone = attack[TOON_KBBONUS_COL][position]
else:
damageDone = 0
else:
damageDone = attack[TOON_HP_COL][position]
if damageDone <= 0 or self.immortalSuits:
continue
if track == HEAL or track == PETSOS:
currTarget = targets[position]
if CAP_HEALS:
toonHp = self.__getToonHp(currTarget)
toonMaxHp = self.__getToonMaxHp(currTarget)
if toonHp + damageDone > toonMaxHp:
damageDone = toonMaxHp - toonHp
attack[TOON_HP_COL][position] = damageDone
self.toonHPAdjusts[currTarget] += damageDone
totalDamages = totalDamages + damageDone
continue
currTarget = targets[position]
currTarget.setHP(currTarget.getHP() - damageDone)
targetId = currTarget.getDoId()
if self.notify.getDebug():
if hpbonus:
self.notify.debug(str(targetId) + ': suit takes ' + str(damageDone) + ' damage from HP-Bonus')
elif kbbonus:
self.notify.debug(str(targetId) + ': suit takes ' + str(damageDone) + ' damage from KB-Bonus')
else:
self.notify.debug(str(targetId) + ': suit takes ' + str(damageDone) + ' damage')
totalDamages = totalDamages + damageDone
if currTarget.getHP() <= 0:
if currTarget.getSkeleRevives() >= 1:
currTarget.useSkeleRevive()
attack[SUIT_REVIVE_COL] = attack[SUIT_REVIVE_COL] | 1 << position
else:
self.suitLeftBattle(targetId)
attack[SUIT_DIED_COL] = attack[SUIT_DIED_COL] | 1 << position
if self.notify.getDebug():
self.notify.debug('Suit' + str(targetId) + 'bravely expired in combat')
return totalDamages
def __combatantDead(self, avId, toon):
if toon:
if self.__getToonHp(avId) <= 0:
return 1
else:
suit = self.battle.findSuit(avId)
if suit.getHP() <= 0:
return 1
return 0
def __combatantJustRevived(self, avId):
suit = self.battle.findSuit(avId)
if suit.reviveCheckAndClear():
return 1
else:
return 0
def __addAttackExp(self, attack, track = -1, level = -1, attackerId = -1):
trk = -1
lvl = -1
id = -1
if track != -1 and level != -1 and attackerId != -1:
trk = track
lvl = level
id = attackerId
elif self.__attackHasHit(attack):
if self.notify.getDebug():
self.notify.debug('Attack ' + repr(attack) + ' has hit')
trk = attack[TOON_TRACK_COL]
lvl = attack[TOON_LVL_COL]
id = attack[TOON_ID_COL]
if trk != -1 and trk != NPCSOS and trk != PETSOS and lvl != -1 and id != -1:
expList = self.toonSkillPtsGained.get(id, None)
if expList == None:
expList = [0,
0,
0,
0,
0,
0,
0,
0]
self.toonSkillPtsGained[id] = expList
expList[trk] = min(ExperienceCap, expList[trk] + (lvl + 1) * self.__skillCreditMultiplier)
def __clearTgtDied(self, tgt, lastAtk, currAtk):
position = self.battle.activeSuits.index(tgt)
currAtkTrack = self.__getActualTrack(currAtk)
lastAtkTrack = self.__getActualTrack(lastAtk)
if currAtkTrack == lastAtkTrack and lastAtk[SUIT_DIED_COL] & 1 << position and self.__attackHasHit(currAtk, suit=0):
if self.notify.getDebug():
self.notify.debug('Clearing suit died for ' + str(tgt.getDoId()) + ' at position ' + str(position) + ' from toon attack ' + str(lastAtk[TOON_ID_COL]) + ' and setting it for ' + str(currAtk[TOON_ID_COL]))
lastAtk[SUIT_DIED_COL] = lastAtk[SUIT_DIED_COL] ^ 1 << position
self.suitLeftBattle(tgt.getDoId())
currAtk[SUIT_DIED_COL] = currAtk[SUIT_DIED_COL] | 1 << position
def __addDmgToBonuses(self, dmg, attackIndex, hp = 1):
toonId = self.toonAtkOrder[attackIndex]
attack = self.battle.toonAttacks[toonId]
atkTrack = self.__getActualTrack(attack)
if atkTrack == HEAL or atkTrack == PETSOS:
return
tgts = self.__createToonTargetList(toonId)
for currTgt in tgts:
tgtPos = self.battle.activeSuits.index(currTgt)
attackerId = self.toonAtkOrder[attackIndex]
attack = self.battle.toonAttacks[attackerId]
track = self.__getActualTrack(attack)
if hp:
if track in self.hpBonuses[tgtPos]:
self.hpBonuses[tgtPos][track].append([attackIndex, dmg])
else:
self.hpBonuses[tgtPos][track] = [[attackIndex, dmg]]
elif self.__suitIsLured(currTgt.getDoId()):
if track in self.kbBonuses[tgtPos]:
self.kbBonuses[tgtPos][track].append([attackIndex, dmg])
else:
self.kbBonuses[tgtPos][track] = [[attackIndex, dmg]]
def __clearBonuses(self, hp = 1):
if hp:
self.hpBonuses = [{},
{},
{},
{}]
else:
self.kbBonuses = [{},
{},
{},
{}]
def __bonusExists(self, tgtSuit, hp = 1):
tgtPos = self.activeSuits.index(tgtSuit)
if hp:
bonusLen = len(self.hpBonuses[tgtPos])
else:
bonusLen = len(self.kbBonuses[tgtPos])
if bonusLen > 0:
return 1
return 0
def __processBonuses(self, hp = 1):
if hp:
bonusList = self.hpBonuses
self.notify.debug('Processing hpBonuses: ' + repr(self.hpBonuses))
else:
bonusList = self.kbBonuses
self.notify.debug('Processing kbBonuses: ' + repr(self.kbBonuses))
tgtPos = 0
for currTgt in bonusList:
for currAtkType in currTgt.keys():
if len(currTgt[currAtkType]) > 1 or not hp and len(currTgt[currAtkType]) > 0:
totalDmgs = 0
for currDmg in currTgt[currAtkType]:
totalDmgs += currDmg[1]
numDmgs = len(currTgt[currAtkType])
attackIdx = currTgt[currAtkType][numDmgs - 1][0]
attackerId = self.toonAtkOrder[attackIdx]
attack = self.battle.toonAttacks[attackerId]
if hp:
attack[TOON_HPBONUS_COL] = math.ceil(totalDmgs * (DamageBonuses[numDmgs - 1] * 0.01))
if self.notify.getDebug():
self.notify.debug('Applying hp bonus to track ' + str(attack[TOON_TRACK_COL]) + ' of ' + str(attack[TOON_HPBONUS_COL]))
elif len(attack[TOON_KBBONUS_COL]) > tgtPos:
attack[TOON_KBBONUS_COL][tgtPos] = totalDmgs * 0.5
if self.notify.getDebug():
self.notify.debug('Applying kb bonus to track ' + str(attack[TOON_TRACK_COL]) + ' of ' + str(attack[TOON_KBBONUS_COL][tgtPos]) + ' to target ' + str(tgtPos))
else:
self.notify.warning('invalid tgtPos for knock back bonus: %d' % tgtPos)
tgtPos += 1
if hp:
self.__clearBonuses()
else:
self.__clearBonuses(hp=0)
def __handleBonus(self, attackIdx, hp = 1):
attackerId = self.toonAtkOrder[attackIdx]
attack = self.battle.toonAttacks[attackerId]
atkDmg = self.__attackDamage(attack, suit=0)
atkTrack = self.__getActualTrack(attack)
if atkDmg > 0:
if hp:
if atkTrack != LURE:
self.notify.debug('Adding dmg of ' + str(atkDmg) + ' to hpBonuses list')
self.__addDmgToBonuses(atkDmg, attackIdx)
elif self.__knockBackAtk(attackerId, toon=1):
self.notify.debug('Adding dmg of ' + str(atkDmg) + ' to kbBonuses list')
self.__addDmgToBonuses(atkDmg, attackIdx, hp=0)
def __clearAttack(self, attackIdx, toon = 1):
if toon:
if self.notify.getDebug():
self.notify.debug('clearing out toon attack for toon ' + str(attackIdx) + '...')
attack = self.battle.toonAttacks[attackIdx]
self.battle.toonAttacks[attackIdx] = getToonAttack(attackIdx)
longest = max(len(self.battle.activeToons), len(self.battle.activeSuits))
taList = self.battle.toonAttacks
for j in xrange(longest):
taList[attackIdx][TOON_HP_COL].append(-1)
taList[attackIdx][TOON_KBBONUS_COL].append(-1)
if self.notify.getDebug():
self.notify.debug('toon attack is now ' + repr(self.battle.toonAttacks[attackIdx]))
else:
self.notify.warning('__clearAttack not implemented for suits!')
def __rememberToonAttack(self, suitId, toonId, damage):
if not suitId in self.SuitAttackers:
self.SuitAttackers[suitId] = {toonId: damage}
elif not toonId in self.SuitAttackers[suitId]:
self.SuitAttackers[suitId][toonId] = damage
elif self.SuitAttackers[suitId][toonId] <= damage:
self.SuitAttackers[suitId] = [toonId, damage]
def __postProcessToonAttacks(self):
self.notify.debug('__postProcessToonAttacks()')
lastTrack = -1
lastAttacks = []
self.__clearBonuses()
for currToonAttack in self.toonAtkOrder:
if currToonAttack != -1:
attack = self.battle.toonAttacks[currToonAttack]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
if atkTrack != HEAL and atkTrack != SOS and atkTrack != NO_ATTACK and atkTrack != NPCSOS and atkTrack != PETSOS:
targets = self.__createToonTargetList(currToonAttack)
allTargetsDead = 1
for currTgt in targets:
damageDone = self.__attackDamage(attack, suit=0)
if damageDone > 0:
self.__rememberToonAttack(currTgt.getDoId(), attack[TOON_ID_COL], damageDone)
if atkTrack == TRAP:
if currTgt.doId in self.traps:
trapInfo = self.traps[currTgt.doId]
currTgt.battleTrap = trapInfo[0]
targetDead = 0
if currTgt.getHP() > 0:
allTargetsDead = 0
else:
targetDead = 1
if atkTrack != LURE:
for currLastAtk in lastAttacks:
self.__clearTgtDied(currTgt, currLastAtk, attack)
tgtId = currTgt.getDoId()
if tgtId in self.successfulLures and atkTrack == LURE:
lureInfo = self.successfulLures[tgtId]
self.notify.debug('applying lure data: ' + repr(lureInfo))
toonId = lureInfo[0]
lureAtk = self.battle.toonAttacks[toonId]
tgtPos = self.battle.activeSuits.index(currTgt)
if currTgt.doId in self.traps:
trapInfo = self.traps[currTgt.doId]
if trapInfo[0] == UBER_GAG_LEVEL_INDEX:
self.notify.debug('train trap triggered for %d' % currTgt.doId)
self.trainTrapTriggered = True
self.__removeSuitTrap(tgtId)
lureAtk[TOON_KBBONUS_COL][tgtPos] = KBBONUS_TGT_LURED
lureAtk[TOON_HP_COL][tgtPos] = lureInfo[3]
elif self.__suitIsLured(tgtId) and atkTrack == DROP:
self.notify.debug('Drop on lured suit, ' + 'indicating with KBBONUS_COL ' + 'flag')
tgtPos = self.battle.activeSuits.index(currTgt)
attack[TOON_KBBONUS_COL][tgtPos] = KBBONUS_LURED_FLAG
if targetDead and atkTrack != lastTrack:
tgtPos = self.battle.activeSuits.index(currTgt)
attack[TOON_HP_COL][tgtPos] = 0
attack[TOON_KBBONUS_COL][tgtPos] = -1
if allTargetsDead and atkTrack != lastTrack:
if self.notify.getDebug():
self.notify.debug('all targets of toon attack ' + str(currToonAttack) + ' are dead')
self.__clearAttack(currToonAttack, toon=1)
attack = self.battle.toonAttacks[currToonAttack]
atkTrack, atkLevel = self.__getActualTrackLevel(attack)
damagesDone = self.__applyToonAttackDamages(currToonAttack)
self.__applyToonAttackDamages(currToonAttack, hpbonus=1)
if atkTrack != LURE and atkTrack != DROP and atkTrack != SOUND:
self.__applyToonAttackDamages(currToonAttack, kbbonus=1)
if lastTrack != atkTrack:
lastAttacks = []
lastTrack = atkTrack
lastAttacks.append(attack)
if self.itemIsCredit(atkTrack, atkLevel):
if atkTrack == TRAP or atkTrack == LURE:
pass
elif atkTrack == HEAL:
if damagesDone != 0:
self.__addAttackExp(attack)
else:
self.__addAttackExp(attack)
if self.trainTrapTriggered:
for suit in self.battle.activeSuits:
suitId = suit.doId
self.__removeSuitTrap(suitId)
suit.battleTrap = NO_TRAP
self.notify.debug('train trap triggered, removing trap from %d' % suitId)
if self.notify.getDebug():
for currToonAttack in self.toonAtkOrder:
attack = self.battle.toonAttacks[currToonAttack]
self.notify.debug('Final Toon attack: ' + str(attack))
def __allTargetsDead(self, attackIdx, toon = 1):
allTargetsDead = 1
if toon:
targets = self.__createToonTargetList(attackIdx)
for currTgt in targets:
if currTgt.getHp() > 0:
allTargetsDead = 0
break
else:
self.notify.warning('__allTargetsDead: suit ver. not implemented!')
return allTargetsDead
def __clearLuredSuitsByAttack(self, toonId, kbBonusReq = 0, targetId = -1):
if self.notify.getDebug():
self.notify.debug('__clearLuredSuitsByAttack')
if targetId != -1 and self.__suitIsLured(t.getDoId()):
self.__removeLured(t.getDoId())
else:
tgtList = self.__createToonTargetList(toonId)
for t in tgtList:
if self.__suitIsLured(t.getDoId()) and (not kbBonusReq or self.__bonusExists(t, hp=0)):
self.__removeLured(t.getDoId())
if self.notify.getDebug():
self.notify.debug('Suit %d stepping from lured spot' % t.getDoId())
else:
self.notify.debug('Suit ' + str(t.getDoId()) + ' not found in currently lured suits')
def __clearLuredSuitsDelayed(self):
if self.notify.getDebug():
self.notify.debug('__clearLuredSuitsDelayed')
for t in self.delayedUnlures:
if self.__suitIsLured(t):
self.__removeLured(t)
if self.notify.getDebug():
self.notify.debug('Suit %d stepping back from lured spot' % t)
else:
self.notify.debug('Suit ' + str(t) + ' not found in currently lured suits')
self.delayedUnlures = []
def __addLuredSuitsDelayed(self, toonId, targetId = -1, ignoreDamageCheck = False):
if self.notify.getDebug():
self.notify.debug('__addLuredSuitsDelayed')
if targetId != -1:
self.delayedUnlures.append(targetId)
else:
tgtList = self.__createToonTargetList(toonId)
for t in tgtList:
if self.__suitIsLured(t.getDoId()) and t.getDoId() not in self.delayedUnlures and (self.__attackDamageForTgt(self.battle.toonAttacks[toonId], self.battle.activeSuits.index(t), suit=0) > 0 or ignoreDamageCheck):
self.delayedUnlures.append(t.getDoId())
def __calculateToonAttacks(self):
self.notify.debug('__calculateToonAttacks()')
self.__clearBonuses(hp=0)
currTrack = None
self.notify.debug('Traps: ' + str(self.traps))
maxSuitLevel = 0
for cog in self.battle.activeSuits:
maxSuitLevel = max(maxSuitLevel, cog.getActualLevel())
self.creditLevel = maxSuitLevel
for toonId in self.toonAtkOrder:
if self.__combatantDead(toonId, toon=1):
if self.notify.getDebug():
self.notify.debug("Toon %d is dead and can't attack" % toonId)
continue
attack = self.battle.toonAttacks[toonId]
atkTrack = self.__getActualTrack(attack)
if atkTrack != NO_ATTACK and atkTrack != SOS and atkTrack != NPCSOS:
if self.notify.getDebug():
self.notify.debug('Calculating attack for toon: %d' % toonId)
if SUITS_UNLURED_IMMEDIATELY:
if currTrack and atkTrack != currTrack:
self.__clearLuredSuitsDelayed()
currTrack = atkTrack
self.__calcToonAtkHp(toonId)
attackIdx = self.toonAtkOrder.index(toonId)
self.__handleBonus(attackIdx, hp=0)
self.__handleBonus(attackIdx, hp=1)
lastAttack = self.toonAtkOrder.index(toonId) >= len(self.toonAtkOrder) - 1
unlureAttack = self.__attackHasHit(attack, suit=0) and self.__unlureAtk(toonId, toon=1)
if unlureAttack:
if lastAttack:
self.__clearLuredSuitsByAttack(toonId)
else:
self.__addLuredSuitsDelayed(toonId)
if lastAttack:
self.__clearLuredSuitsDelayed()
self.__processBonuses(hp=0)
self.__processBonuses(hp=1)
self.__postProcessToonAttacks()
def __knockBackAtk(self, attackIndex, toon = 1):
if toon and (self.battle.toonAttacks[attackIndex][TOON_TRACK_COL] == THROW or self.battle.toonAttacks[attackIndex][TOON_TRACK_COL] == SQUIRT):
if self.notify.getDebug():
self.notify.debug('attack is a knockback')
return 1
return 0
def __unlureAtk(self, attackIndex, toon = 1):
attack = self.battle.toonAttacks[attackIndex]
track = self.__getActualTrack(attack)
if toon and (track == THROW or track == SQUIRT or track == SOUND or track == ZAP):
if self.notify.getDebug():
self.notify.debug('attack is an unlure')
return 1
return 0
def __calcSuitAtkType(self, attackIndex):
theSuit = self.battle.activeSuits[attackIndex]
attacks = SuitBattleGlobals.SuitAttributes[theSuit.dna.name]['attacks']
atk = SuitBattleGlobals.pickSuitAttack(attacks, theSuit.getLevel())
return atk
def __calcSuitTarget(self, attackIndex):
attack = self.battle.suitAttacks[attackIndex]
suitId = attack[SUIT_ID_COL]
if suitId in self.SuitAttackers and random.randint(0, 99) < 75:
totalDamage = 0
for currToon in self.SuitAttackers[suitId].keys():
totalDamage += self.SuitAttackers[suitId][currToon]
dmgs = []
for currToon in self.SuitAttackers[suitId].keys():
dmgs.append(self.SuitAttackers[suitId][currToon] / totalDamage * 100)
dmgIdx = SuitBattleGlobals.pickFromFreqList(dmgs)
if dmgIdx == None:
toonId = self.__pickRandomToon(suitId)
else:
toonId = self.SuitAttackers[suitId].keys()[dmgIdx]
if toonId == -1 or toonId not in self.battle.activeToons:
return -1
self.notify.debug('Suit attacking back at toon ' + str(toonId))
return self.battle.activeToons.index(toonId)
else:
return self.__pickRandomToon(suitId)
def __pickRandomToon(self, suitId):
liveToons = []
for currToon in self.battle.activeToons:
if not self.__combatantDead(currToon, toon=1):
liveToons.append(self.battle.activeToons.index(currToon))
if len(liveToons) == 0:
self.notify.debug('No tgts avail. for suit ' + str(suitId))
return -1
chosen = random.choice(liveToons)
self.notify.debug('Suit randomly attacking toon ' + str(self.battle.activeToons[chosen]))
return chosen
def __suitAtkHit(self, attackIndex):
if self.suitsAlwaysHit:
return 1
elif self.suitsAlwaysMiss:
return 0
theSuit = self.battle.activeSuits[attackIndex]
atkType = self.battle.suitAttacks[attackIndex][SUIT_ATK_COL]
atkInfo = SuitBattleGlobals.getSuitAttack(theSuit.dna.name, theSuit.getLevel(), atkType)
atkAcc = atkInfo['acc']
suitAcc = SuitBattleGlobals.SuitAttributes[theSuit.dna.name]['acc'][theSuit.getLevel()]
acc = atkAcc
randChoice = random.randint(0, 99)
if self.notify.getDebug():
self.notify.debug('Suit attack rolled ' + str(randChoice) + ' to hit with an accuracy of ' + str(acc) + ' (attackAcc: ' + str(atkAcc) + ' suitAcc: ' + str(suitAcc) + ')')
if randChoice < acc:
return 1
return 0
def __suitAtkAffectsGroup(self, attack):
atkType = attack[SUIT_ATK_COL]
theSuit = self.battle.findSuit(attack[SUIT_ID_COL])
atkInfo = SuitBattleGlobals.getSuitAttack(theSuit.dna.name, theSuit.getLevel(), atkType)
return atkInfo['group'] != SuitBattleGlobals.ATK_TGT_SINGLE
def __createSuitTargetList(self, attackIndex):
attack = self.battle.suitAttacks[attackIndex]
targetList = []
if attack[SUIT_ATK_COL] == NO_ATTACK:
self.notify.debug('No attack, no targets')
return targetList
debug = self.notify.getDebug()
if not self.__suitAtkAffectsGroup(attack):
targetList.append(self.battle.activeToons[attack[SUIT_TGT_COL]])
if debug:
self.notify.debug('Suit attack is single target')
else:
if debug:
self.notify.debug('Suit attack is group target')
for currToon in self.battle.activeToons:
if debug:
self.notify.debug('Suit attack will target toon' + str(currToon))
targetList.append(currToon)
return targetList
def __calcSuitAtkHp(self, attackIndex):
targetList = self.__createSuitTargetList(attackIndex)
attack = self.battle.suitAttacks[attackIndex]
for currTarget in xrange(len(targetList)):
toonId = targetList[currTarget]
toon = self.battle.getToon(toonId)
result = 0
if toon and toon.immortalMode:
result = 1
elif TOONS_TAKE_NO_DAMAGE:
result = 0
elif self.__suitAtkHit(attackIndex):
atkType = attack[SUIT_ATK_COL]
theSuit = self.battle.findSuit(attack[SUIT_ID_COL])
atkInfo = SuitBattleGlobals.getSuitAttack(theSuit.dna.name, theSuit.getLevel(), atkType)
result = atkInfo['hp']
targetIndex = self.battle.activeToons.index(toonId)
attack[SUIT_HP_COL][targetIndex] = result
def __getToonHp(self, toonDoId):
handle = self.battle.getToon(toonDoId)
if handle != None and toonDoId in self.toonHPAdjusts:
return handle.hp + self.toonHPAdjusts[toonDoId]
else:
return 0
def __getToonMaxHp(self, toonDoId):
handle = self.battle.getToon(toonDoId)
if handle != None:
return handle.maxHp
else:
return 0
def __applySuitAttackDamages(self, attackIndex):
attack = self.battle.suitAttacks[attackIndex]
if APPLY_HEALTH_ADJUSTMENTS:
for t in self.battle.activeToons:
position = self.battle.activeToons.index(t)
if attack[SUIT_HP_COL][position] <= 0:
continue
toonHp = self.__getToonHp(t)
if toonHp - attack[SUIT_HP_COL][position] <= 0:
if self.notify.getDebug():
self.notify.debug('Toon %d has died, removing' % t)
self.toonLeftBattle(t)
attack[TOON_DIED_COL] = attack[TOON_DIED_COL] | 1 << position
if self.notify.getDebug():
self.notify.debug('Toon ' + str(t) + ' takes ' + str(attack[SUIT_HP_COL][position]) + ' damage')
self.toonHPAdjusts[t] -= attack[SUIT_HP_COL][position]
self.notify.debug('Toon ' + str(t) + ' now has ' + str(self.__getToonHp(t)) + ' health')
def __suitCanAttack(self, suitId):
if self.__combatantDead(suitId, toon=0) or self.__suitIsLured(suitId) or self.__combatantJustRevived(suitId):
return 0
return 1
def __updateSuitAtkStat(self, toonId):
if toonId in self.suitAtkStats:
self.suitAtkStats[toonId] += 1
else:
self.suitAtkStats[toonId] = 1
def __printSuitAtkStats(self):
self.notify.debug('Suit Atk Stats:')
for currTgt in self.suitAtkStats.keys():
if currTgt not in self.battle.activeToons:
continue
tgtPos = self.battle.activeToons.index(currTgt)
self.notify.debug(' toon ' + str(currTgt) + ' at position ' + str(tgtPos) + ' was attacked ' + str(self.suitAtkStats[currTgt]) + ' times')
self.notify.debug('\n')
def __calculateSuitAttacks(self):
for i in xrange(len(self.battle.suitAttacks)):
if i < len(self.battle.activeSuits):
suitId = self.battle.activeSuits[i].doId
self.battle.suitAttacks[i][SUIT_ID_COL] = suitId
if not self.__suitCanAttack(suitId):
if self.notify.getDebug():
self.notify.debug("Suit %d can't attack" % suitId)
continue
if self.battle.pendingSuits.count(self.battle.activeSuits[i]) > 0 or self.battle.joiningSuits.count(self.battle.activeSuits[i]) > 0:
continue
attack = self.battle.suitAttacks[i]
attack[SUIT_ID_COL] = self.battle.activeSuits[i].doId
attack[SUIT_ATK_COL] = self.__calcSuitAtkType(i)
attack[SUIT_TGT_COL] = self.__calcSuitTarget(i)
if attack[SUIT_TGT_COL] == -1:
self.battle.suitAttacks[i] = getDefaultSuitAttack()
attack = self.battle.suitAttacks[i]
self.notify.debug('clearing suit attack, no avail targets')
self.__calcSuitAtkHp(i)
if attack[SUIT_ATK_COL] != NO_ATTACK:
if self.__suitAtkAffectsGroup(attack):
for currTgt in self.battle.activeToons:
self.__updateSuitAtkStat(currTgt)
else:
tgtId = self.battle.activeToons[attack[SUIT_TGT_COL]]
self.__updateSuitAtkStat(tgtId)
targets = self.__createSuitTargetList(i)
allTargetsDead = 1
for currTgt in targets:
if self.__getToonHp(currTgt) > 0:
allTargetsDead = 0
break
if allTargetsDead:
self.battle.suitAttacks[i] = getDefaultSuitAttack()
if self.notify.getDebug():
self.notify.debug('clearing suit attack, targets dead')
self.notify.debug('suit attack is now ' + repr(self.battle.suitAttacks[i]))
self.notify.debug('all attacks: ' + repr(self.battle.suitAttacks))
attack = self.battle.suitAttacks[i]
if self.__attackHasHit(attack, suit=1):
self.__applySuitAttackDamages(i)
if self.notify.getDebug():
self.notify.debug('Suit attack: ' + str(self.battle.suitAttacks[i]))
attack[SUIT_BEFORE_TOONS_COL] = 0
def __updateLureTimeouts(self):
if self.notify.getDebug():
self.notify.debug('__updateLureTimeouts()')
self.notify.debug('Lured suits: ' + str(self.currentlyLuredSuits))
noLongerLured = []
for currLuredSuit in self.currentlyLuredSuits.keys():
self.__incLuredCurrRound(currLuredSuit)
if self.__luredMaxRoundsReached(currLuredSuit) or self.__luredWakeupTime(currLuredSuit):
noLongerLured.append(currLuredSuit)
for currLuredSuit in noLongerLured:
self.__removeLured(currLuredSuit)
if self.notify.getDebug():
self.notify.debug('Lured suits: ' + str(self.currentlyLuredSuits))
def __initRound(self):
if CLEAR_SUIT_ATTACKERS:
self.SuitAttackers = {}
self.toonAtkOrder = []
attacks = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, PETSOS)
for atk in attacks:
self.toonAtkOrder.append(atk[TOON_ID_COL])
attacks = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, FIRE)
for atk in attacks:
self.toonAtkOrder.append(atk[TOON_ID_COL])
for track in xrange(HEAL, DROP + 1):
attacks = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, track)
if track == TRAP:
sortedTraps = []
for atk in attacks:
if atk[TOON_TRACK_COL] == TRAP:
sortedTraps.append(atk)
for atk in attacks:
if atk[TOON_TRACK_COL] == NPCSOS:
sortedTraps.append(atk)
attacks = sortedTraps
for atk in attacks:
self.toonAtkOrder.append(atk[TOON_ID_COL])
specials = findToonAttack(self.battle.activeToons, self.battle.toonAttacks, NPCSOS)
toonsHit = 0
cogsMiss = 0
for special in specials:
npc_track = NPCToons.getNPCTrack(special[TOON_TGT_COL])
if npc_track == NPC_TOONS_HIT:
BattleCalculatorAI.toonsAlwaysHit = 1
toonsHit = 1
elif npc_track == NPC_COGS_MISS:
BattleCalculatorAI.suitsAlwaysMiss = 1
cogsMiss = 1
if self.notify.getDebug():
self.notify.debug('Toon attack order: ' + str(self.toonAtkOrder))
self.notify.debug('Active toons: ' + str(self.battle.activeToons))
self.notify.debug('Toon attacks: ' + str(self.battle.toonAttacks))
self.notify.debug('Active suits: ' + str(self.battle.activeSuits))
self.notify.debug('Suit attacks: ' + str(self.battle.suitAttacks))
self.toonHPAdjusts = {}
for t in self.battle.activeToons:
self.toonHPAdjusts[t] = 0
self.__clearBonuses()
self.__updateActiveToons()
self.delayedUnlures = []
self.__initTraps()
self.successfulLures = {}
return (toonsHit, cogsMiss)
def calculateRound(self):
longest = max(len(self.battle.activeToons), len(self.battle.activeSuits))
for t in self.battle.activeToons:
for j in xrange(longest):
self.battle.toonAttacks[t][TOON_HP_COL].append(-1)
self.battle.toonAttacks[t][TOON_KBBONUS_COL].append(-1)
for i in xrange(4):
for j in xrange(len(self.battle.activeToons)):
self.battle.suitAttacks[i][SUIT_HP_COL].append(-1)
toonsHit, cogsMiss = self.__initRound()
for suit in self.battle.activeSuits:
if suit.isGenerated():
suit.b_setHP(suit.getHP())
for suit in self.battle.activeSuits:
if not hasattr(suit, 'dna'):
self.notify.warning('a removed suit is in this battle!')
return None
self.__calculateToonAttacks()
self.__updateLureTimeouts()
self.__calculateSuitAttacks()
if toonsHit == 1:
BattleCalculatorAI.toonsAlwaysHit = 0
if cogsMiss == 1:
BattleCalculatorAI.suitsAlwaysMiss = 0
if self.notify.getDebug():
self.notify.debug('Toon skills gained after this round: ' + repr(self.toonSkillPtsGained))
self.__printSuitAtkStats()
self.currentlyWetSuits = []
def __calculateFiredCogs():
import pdb
pdb.set_trace()
def toonLeftBattle(self, toonId):
if self.notify.getDebug():
self.notify.debug('toonLeftBattle()' + str(toonId))
if toonId in self.toonSkillPtsGained:
del self.toonSkillPtsGained[toonId]
if toonId in self.suitAtkStats:
del self.suitAtkStats[toonId]
if not CLEAR_SUIT_ATTACKERS:
oldSuitIds = []
for s in self.SuitAttackers.keys():
if toonId in self.SuitAttackers[s]:
del self.SuitAttackers[s][toonId]
if len(self.SuitAttackers[s]) == 0:
oldSuitIds.append(s)
for oldSuitId in oldSuitIds:
del self.SuitAttackers[oldSuitId]
self.__clearTrapCreator(toonId)
self.__clearLurer(toonId)
def suitLeftBattle(self, suitId):
if self.notify.getDebug():
self.notify.debug('suitLeftBattle(): ' + str(suitId))
self.__removeLured(suitId)
if suitId in self.SuitAttackers:
del self.SuitAttackers[suitId]
self.__removeSuitTrap(suitId)
def __updateActiveToons(self):
if self.notify.getDebug():
self.notify.debug('updateActiveToons()')
if not CLEAR_SUIT_ATTACKERS:
oldSuitIds = []
for s in self.SuitAttackers.keys():
for t in self.SuitAttackers[s].keys():
if t not in self.battle.activeToons:
del self.SuitAttackers[s][t]
if len(self.SuitAttackers[s]) == 0:
oldSuitIds.append(s)
for oldSuitId in oldSuitIds:
del self.SuitAttackers[oldSuitId]
for trap in self.traps.keys():
if self.traps[trap][1] not in self.battle.activeToons:
self.notify.debug('Trap for toon ' + str(self.traps[trap][1]) + ' will no longer give exp')
self.traps[trap][1] = 0
def getSkillGained(self, toonId, track):
return BattleExperienceAI.getSkillGained(self.toonSkillPtsGained, toonId, track)
def getLuredSuits(self):
luredSuits = self.currentlyLuredSuits.keys()
self.notify.debug('Lured suits reported to battle: ' + repr(luredSuits))
return luredSuits
def __suitIsLured(self, suitId, prevRound = 0):
inList = suitId in self.currentlyLuredSuits
if prevRound:
return inList and self.currentlyLuredSuits[suitId][0] != -1
return inList
def __findAvailLureId(self, lurerId):
luredSuits = self.currentlyLuredSuits.keys()
lureIds = []
for currLured in luredSuits:
lurerInfo = self.currentlyLuredSuits[currLured][3]
lurers = lurerInfo.keys()
for currLurer in lurers:
currId = lurerInfo[currLurer][1]
if currLurer == lurerId and currId not in lureIds:
lureIds.append(currId)
lureIds.sort()
currId = 1
for currLureId in lureIds:
if currLureId != currId:
return currId
currId += 1
return currId
def __addLuredSuitInfo(self, suitId, currRounds, maxRounds, wakeChance, lurer, lureLvl, lureId = -1, npc = 0):
if lureId == -1:
availLureId = self.__findAvailLureId(lurer)
else:
availLureId = lureId
if npc == 1:
credit = 0
else:
credit = self.itemIsCredit(LURE, lureLvl)
if suitId in self.currentlyLuredSuits:
lureInfo = self.currentlyLuredSuits[suitId]
if not lurer in lureInfo[3]:
lureInfo[1] += maxRounds
if wakeChance < lureInfo[2]:
lureInfo[2] = wakeChance
lureInfo[3][lurer] = [lureLvl, availLureId, credit]
else:
lurerInfo = {lurer: [lureLvl, availLureId, credit]}
self.currentlyLuredSuits[suitId] = [currRounds,
maxRounds,
wakeChance,
lurerInfo]
self.notify.debug('__addLuredSuitInfo: currLuredSuits -> %s' % repr(self.currentlyLuredSuits))
return availLureId
def __getLurers(self, suitId):
if self.__suitIsLured(suitId):
return self.currentlyLuredSuits[suitId][3].keys()
return []
def __getLuredExpInfo(self, suitId):
returnInfo = []
lurers = self.__getLurers(suitId)
if len(lurers) == 0:
return returnInfo
lurerInfo = self.currentlyLuredSuits[suitId][3]
for currLurer in lurers:
returnInfo.append([currLurer,
lurerInfo[currLurer][0],
lurerInfo[currLurer][1],
lurerInfo[currLurer][2]])
return returnInfo
def __clearLurer(self, lurerId, lureId = -1):
luredSuits = self.currentlyLuredSuits.keys()
for currLured in luredSuits:
lurerInfo = self.currentlyLuredSuits[currLured][3]
lurers = lurerInfo.keys()
for currLurer in lurers:
if currLurer == lurerId and (lureId == -1 or lureId == lurerInfo[currLurer][1]):
del lurerInfo[currLurer]
def __setLuredMaxRounds(self, suitId, rounds):
if self.__suitIsLured(suitId):
self.currentlyLuredSuits[suitId][1] = rounds
def __setLuredWakeChance(self, suitId, chance):
if self.__suitIsLured(suitId):
self.currentlyLuredSuits[suitId][2] = chance
def __incLuredCurrRound(self, suitId):
if self.__suitIsLured(suitId):
self.currentlyLuredSuits[suitId][0] += 1
def __removeLured(self, suitId):
if self.__suitIsLured(suitId):
del self.currentlyLuredSuits[suitId]
def __luredMaxRoundsReached(self, suitId):
return self.__suitIsLured(suitId) and self.currentlyLuredSuits[suitId][0] >= self.currentlyLuredSuits[suitId][1]
def __luredWakeupTime(self, suitId):
return self.__suitIsLured(suitId) and self.currentlyLuredSuits[suitId][0] > 0 and random.randint(0, 99) < self.currentlyLuredSuits[suitId][2]
def itemIsCredit(self, track, level):
if track == PETSOS:
return 0
return level < self.creditLevel
def __getActualTrack(self, toonAttack):
if toonAttack[TOON_TRACK_COL] == NPCSOS:
track = NPCToons.getNPCTrack(toonAttack[TOON_TGT_COL])
if track != None:
return track
else:
self.notify.warning('No NPC with id: %d' % toonAttack[TOON_TGT_COL])
return toonAttack[TOON_TRACK_COL]
def __getActualTrackLevel(self, toonAttack):
if toonAttack[TOON_TRACK_COL] == NPCSOS:
track, level, hp = NPCToons.getNPCTrackLevelHp(toonAttack[TOON_TGT_COL])
if track != None:
return (track, level)
else:
self.notify.warning('No NPC with id: %d' % toonAttack[TOON_TGT_COL])
return (toonAttack[TOON_TRACK_COL], toonAttack[TOON_LVL_COL])
def __getActualTrackLevelHp(self, toonAttack):
if toonAttack[TOON_TRACK_COL] == NPCSOS:
track, level, hp = NPCToons.getNPCTrackLevelHp(toonAttack[TOON_TGT_COL])
if track != None:
return (track, level, hp)
else:
self.notify.warning('No NPC with id: %d' % toonAttack[TOON_TGT_COL])
elif toonAttack[TOON_TRACK_COL] == PETSOS:
trick = toonAttack[TOON_LVL_COL]
petProxyId = toonAttack[TOON_TGT_COL]
trickId = toonAttack[TOON_LVL_COL]
healRange = PetTricks.TrickHeals[trickId]
hp = 0
if petProxyId in simbase.air.doId2do:
petProxy = simbase.air.doId2do[petProxyId]
if trickId < len(petProxy.trickAptitudes):
aptitude = petProxy.trickAptitudes[trickId]
hp = int(lerp(healRange[0], healRange[1], aptitude))
else:
self.notify.warning('pet proxy: %d not in doId2do!' % petProxyId)
return (toonAttack[TOON_TRACK_COL], toonAttack[TOON_LVL_COL], hp)
return (toonAttack[TOON_TRACK_COL], toonAttack[TOON_LVL_COL], 0)
def __calculatePetTrickSuccess(self, toonAttack):
petProxyId = toonAttack[TOON_TGT_COL]
if not petProxyId in simbase.air.doId2do:
self.notify.warning('pet proxy %d not in doId2do!' % petProxyId)
toonAttack[TOON_ACCBONUS_COL] = 1
return (0, 0)
petProxy = simbase.air.doId2do[petProxyId]
trickId = toonAttack[TOON_LVL_COL]
toonAttack[TOON_ACCBONUS_COL] = petProxy.attemptBattleTrick(trickId)
if toonAttack[TOON_ACCBONUS_COL] == 1:
return (0, 0)
else:
return (1, 100)
| 46.025894 | 272 | 0.56502 |
acf0d20b12715a5d2af2850faeed0941262633b4 | 741 | py | Python | project/tests/test_string_replace.py | TobiasPrt/Smartphoniker-shop | 6b74a3cc1c81db7a56d70609dbca29ddeec3053f | [
"MIT"
] | 2 | 2020-05-11T08:46:45.000Z | 2020-05-11T09:09:57.000Z | project/tests/test_string_replace.py | TobiasPrt/Smartphoniker-shop | 6b74a3cc1c81db7a56d70609dbca29ddeec3053f | [
"MIT"
] | 4 | 2021-02-19T13:31:53.000Z | 2022-02-20T13:34:10.000Z | project/tests/test_string_replace.py | TobiasPrt/Smartphoniker-shop | 6b74a3cc1c81db7a56d70609dbca29ddeec3053f | [
"MIT"
] | 5 | 2020-04-27T16:25:39.000Z | 2020-06-07T16:03:15.000Z | from project.server.common.escape import cleanify
class TestReplacements:
def test_ae(self):
assert cleanify("") == ""
assert cleanify("Äpfel") == "Aepfel"
assert cleanify("äpfel") == "aepfel"
assert cleanify("Äpfel Äpfel äpfel") == "Aepfel Aepfel aepfel"
def test_oe(self):
assert cleanify("Ömel") == "Oemel"
assert cleanify("ömel") == "oemel"
assert cleanify("Ömel ömel Ömel") == "Oemel oemel Oemel"
def test_ue(self):
assert cleanify("Ümel") == "Uemel"
assert cleanify("ümel") == "uemel"
assert cleanify("Ümel ümel Ümel") == "Uemel uemel Uemel"
def test_ss(self):
assert cleanify("Scheiße") == "Scheisse"
| 30.875 | 71 | 0.585695 |
acf0d25a21b7723c97487f65d81696ef347b68e6 | 248 | py | Python | app_backend/models/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 1 | 2020-06-21T04:08:26.000Z | 2020-06-21T04:08:26.000Z | app_backend/models/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 13 | 2019-10-18T17:19:32.000Z | 2022-01-13T00:44:43.000Z | app_backend/models/__init__.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 5 | 2019-02-07T03:15:16.000Z | 2021-09-04T14:06:28.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: __init__.py
@time: 2018-03-14 17:14
"""
def func():
pass
class Main(object):
def __init__(self):
pass
if __name__ == '__main__':
pass
| 10.782609 | 26 | 0.612903 |
acf0d269c12aacc3cb13cfb0bbfff464204c3310 | 12,425 | py | Python | src/xls_handler.py | AleX04Nov/ipt_schedule_bot | 99ee2d084372e9151a3e8882330897538df4da19 | [
"Unlicense"
] | null | null | null | src/xls_handler.py | AleX04Nov/ipt_schedule_bot | 99ee2d084372e9151a3e8882330897538df4da19 | [
"Unlicense"
] | null | null | null | src/xls_handler.py | AleX04Nov/ipt_schedule_bot | 99ee2d084372e9151a3e8882330897538df4da19 | [
"Unlicense"
] | null | null | null | import re
import xlrd
def get_key(d, value):
for k, v in d.items():
if v == value:
return k
return -1
class XlsHandler:
def __init__(self, path, day_of_week):
self.day_of_week = day_of_week
self.rb = None
self.sheet = None
self.merged_dict = None
self.update(path)
def update(self, path):
self.rb = xlrd.open_workbook(path, formatting_info = True)
self.sheet = self.rb.sheet_by_index(0)
self.merge_cells()
######################################################
# !!! XLS ROZKLAD PARSING BELOW !!! #
def merge_cells(self,):
merged = self.sheet.merged_cells
self.merged_dict = dict()
for i in range(len(merged)):
a = merged[i][0]
b = merged[i][1]
for rowx in range(a, b):
c = merged[i][2]
d = merged[i][3]
for colx in range(c, d):
self.merged_dict[
(rowx, colx)
] = self.sheet.cell_value(a, c)
def find_timetable(self, group_name, timetable):
res = timetable
sheet = self.sheet
merged_dict = self.merged_dict
for main_col in range(sheet.ncols):
for main_row in range(sheet.nrows):
name_quest = str(sheet.cell_value(main_row, main_col)).lower()
name_quest = re.sub('[-]', '', name_quest)
if name_quest == group_name and name_quest != '':
res[name_quest] = dict()
for i in range(2):
res[name_quest]["week: {}".format(i + 1)] = dict()
j = 0
for row in range(4, sheet.nrows):
if (row - 4) % 15 == 0:
j += 1
les = 0
if j == 7:
break
res[
name_quest
][
"week: {}".format(i + 1)
][
"day: {}".format(j)
] = dict()
if row % 3 == (2 + i) % 3:
les += 1
value = sheet.cell_value(row, main_col)
if value == "":
value = merged_dict.get(
(row, main_col),
""
)
if len(value) <= 1:
value = ""
value = re.sub('[\n]', ' ', value)
res[
name_quest
][
"week: {}".format(i + 1)
][
"day: {}".format(j)
][
"{} lesson: ".format(les)
] = value
return res
return res
def print_timetable(self, group, table_dict):
print("<======= GROUP ", group, " GROUP =======>")
group_table = table_dict[group]
for i in range(1, 3):
print("Week: ", i)
week = group_table["week: {}".format(i)]
for j in range(1, 7):
print("---------------------\nDay: ", j)
day = week["day: {}".format(j)]
for les in range(1, 6):
value = day["{} lesson: ".format(les)]
print(les, " Lesson: ", value)
print("=====================================")
def get_day_timetable(self, group, table_dict, day_index, week_index):
res = "*{}".format(
self.day_of_week[str(day_index)]
) + " {}*\n*-------------------------*\n".format(week_index)
group_table = table_dict[group]
week = group_table["week: {}".format(week_index)]
day = week["day: {}".format(day_index)]
for les in range(1, 6):
value = day["{} lesson: ".format(les)]
if value == "":
value = "_----_"
res += "*{})* ".format(les) + "{}\n".format(value)
return res
def get_current_lesson(
self,
group,
table_dict,
day_index,
week_index,
less_index
):
value = table_dict[
group
][
"week: {}".format(week_index)
][
"day: {}".format(day_index)
][
"{} lesson: ".format(less_index)
]
return value
def get_day_for_week_timetable(
self,
group,
table_dict,
day_index,
week_index
):
res = "*-------------------------*\n*{}:*\n".format(
self.day_of_week[str(day_index)]
)
group_table = table_dict[group]
week = group_table["week: {}".format(week_index)]
day = week["day: {}".format(day_index)]
for les in range(1, 6):
value = day["{} lesson: ".format(les)]
if value == "":
value = "_----_"
res += "*{})* ".format(les) + "{}\n".format(value)
return res
def find_info(self, request):
sheet = self.sheet
merged_dict = self.merged_dict
res = dict()
res[request] = dict()
for i in range(2):
res[request]["week: {}".format(i + 1)] = dict()
for j in range(1, 7):
res[
request
][
"week: {}".format(i + 1)
][
"day: {}".format(j)
] = dict()
for les in range(1, 6):
res[
request
][
"week: {}".format(i + 1)
][
"day: {}".format(j)
][
"{} lesson: ".format(les)
] = ""
bool_found = False
for main_col in range(sheet.ncols):
for main_row in range(sheet.nrows):
if (
str(sheet.cell_value(
main_row,
main_col
)).lower()
).find(request.lower()) != -1:
bool_found = True
week = 1 if main_row % 3 == 2 else 2
day = (
(main_row - ((main_row - 4) % 15) + 1 - 6) // 15
) + 2
lesson = int(sheet.cell_value(main_row, 1)) if str(
sheet.cell_value(main_row, 1)
) != "" else int(sheet.cell_value(main_row - 1, 1))
groups = str(sheet.cell_value(3, main_col))
iter = 1
while True:
if str(
sheet.cell_value(main_row, main_col + iter)
) != "":
break
if str(
merged_dict.get((main_row, main_col + iter), "")
) != "" and str(
merged_dict.get((main_row, main_col + iter), "")
) == sheet.cell_value(main_row, main_col):
groups += ", "
groups += str(
sheet.cell_value(3, main_col + iter)
) if str(
sheet.cell_value(2, main_col + iter)
) == "" else str(
sheet.cell_value(2, main_col + iter)
) + ", " + str(
sheet.cell_value(3, main_col + iter)
)
iter += 1
if res[
request
][
"week: {}".format(week)
][
"day: {}".format(day)
][
"{} lesson: ".format(lesson)
] == "":
res[
request
][
"week: {}".format(week)
][
"day: {}".format(day)
][
"{} lesson: ".format(lesson)
] = str(
sheet.cell_value(main_row, main_col)
) + " *" + groups + "*"
else:
res[
request
][
"week: {}".format(week)
][
"day: {}".format(day)
][
"{} lesson: ".format(lesson)
] += "\n||\n" + str(
sheet.cell_value(main_row, main_col)
) + " *" + str(sheet.cell_value(3, main_col)) + "*"
if week == 1:
value = sheet.cell_value(main_row + 1, main_col)
if str(value) == "":
value = merged_dict.get(
(main_row + 1, main_col),
""
)
if week == 1 and str(value) == str(
sheet.cell_value(main_row, main_col)
):
if res[
request
][
"week: 2"
][
"day: {}".format(day)
][
"{} lesson: ".format(lesson)
] == "":
res[
request
][
"week: 2"
][
"day: {}".format(day)
][
"{} lesson: ".format(lesson)
] = str(value) + " *" + groups + "*"
else:
res[
request
][
"week: 2"
][
"day: {}".format(day)
][
"{} lesson: ".format(lesson)
] += "\n||\n" + str(value) + " *" + groups + "*"
if bool_found is False:
return "На жаль дана інформація не була знайдена у таблиці. :с"
week_res = self.get_week_timetable(request, res, 1)
week_res += "\n" * 2 + self.get_week_timetable(request, res, 2)
return week_res
def get_week_timetable(self, group, table_dict, week_index):
res = "*Тиждень: {}*\n".format(week_index)
for day_index in range(1, 7):
day_temp = self.get_day_for_week_timetable(
group,
table_dict,
day_index,
week_index
)
day_temp = day_temp[:-1]
### DELETING EMPTY LESSONS ###
for del_str in range(5, 0, -1):
if day_temp.rfind("_----_") == len(day_temp) - 6:
day_temp = day_temp[:(
day_temp.rfind("*{})*".format(del_str))
) - 1]
### DELETING EMPTY DAYS ###
if get_key(
self.day_of_week,
re.sub(r'[*-: \n]', "", day_temp)
) == -1:
res += day_temp + '\n'
else:
res += ''
res += "*-------------------------*"
return res
| 36.330409 | 78 | 0.327565 |
acf0d3281cd4d3fdf6f5b47e43b780497793f58d | 2,052 | py | Python | Calibration.py | rahuljain1310/Augmented-Reality-Application | 6a464151fc08af45197b35a68734bc613ed2a7db | [
"MIT"
] | null | null | null | Calibration.py | rahuljain1310/Augmented-Reality-Application | 6a464151fc08af45197b35a68734bc613ed2a7db | [
"MIT"
] | null | null | null | Calibration.py | rahuljain1310/Augmented-Reality-Application | 6a464151fc08af45197b35a68734bc613ed2a7db | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import glob
# from objloader import *
import os
dir_name = os.getcwd()
dir_models = os.path.join(dir_name,'reference')
dir_objs = os.path.join(dir_name,'models')
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.mgrid[0:7,0:6].T.reshape(-1,2)
print(objp)
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (7,6),None)
if ret:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
print(corners2)
print(corners2.shape,objp.shape)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.line(img,(0,0),(10,10),(0,0,255))
img = cv2.line(img,(100,30),(10,10),(0,255,255))
img = cv2.drawChessboardCorners(img, (7,6), corners2,ret)
cv2.imshow('img',img)
cv2.waitKey(20000)
def render(img, obj, projection, model, color=False):
"""
Render a loaded obj model into the current video frame
"""
vertices = obj.vertices
scale_matrix = np.eye(3) * 3
h, w = model.shape
for face in obj.faces:
face_vertices = face[0]
points = np.array([vertices[vertex - 1] for vertex in face_vertices])
points = np.dot(points, scale_matrix)
# render model in the middle of the reference surface. To do so,
# model points must be displaced
points = np.array([[p[0] + w / 2, p[1] + h / 2, p[2]] for p in points])
dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)
imgpts = np.int32(dst)
if color is False:
cv2.fillConvexPoly(img, imgpts, (137, 27, 211))
else:
color = hex_to_rgb(face[-1])
color = color[::-1] # reverse
cv2.fillConvexPoly(img, imgpts, color)
return img
# def main():
# model_h = | 32.571429 | 79 | 0.627193 |
acf0d336bf6c1e87040138da7e7a06243aa31d8b | 521 | py | Python | env/lib/python3.8/site-packages/plotly/validators/densitymapbox/colorbar/_x.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/densitymapbox/colorbar/_x.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/densitymapbox/colorbar/_x.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="x", parent_name="densitymapbox.colorbar", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
max=kwargs.pop("max", 3),
min=kwargs.pop("min", -2),
role=kwargs.pop("role", "style"),
**kwargs
)
| 34.733333 | 88 | 0.618042 |
acf0d358de2575bf329130b857fefdedecd277b4 | 1,995 | py | Python | alipay/aop/api/domain/AntMerchantExpandWarehouseOrderSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/AntMerchantExpandWarehouseOrderSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/AntMerchantExpandWarehouseOrderSyncModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntMerchantExpandWarehouseOrderSyncModel(object):
def __init__(self):
self._assign_item_id = None
self._new_warehouse_id = None
self._type = None
@property
def assign_item_id(self):
return self._assign_item_id
@assign_item_id.setter
def assign_item_id(self, value):
self._assign_item_id = value
@property
def new_warehouse_id(self):
return self._new_warehouse_id
@new_warehouse_id.setter
def new_warehouse_id(self, value):
self._new_warehouse_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.assign_item_id:
if hasattr(self.assign_item_id, 'to_alipay_dict'):
params['assign_item_id'] = self.assign_item_id.to_alipay_dict()
else:
params['assign_item_id'] = self.assign_item_id
if self.new_warehouse_id:
if hasattr(self.new_warehouse_id, 'to_alipay_dict'):
params['new_warehouse_id'] = self.new_warehouse_id.to_alipay_dict()
else:
params['new_warehouse_id'] = self.new_warehouse_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandWarehouseOrderSyncModel()
if 'assign_item_id' in d:
o.assign_item_id = d['assign_item_id']
if 'new_warehouse_id' in d:
o.new_warehouse_id = d['new_warehouse_id']
if 'type' in d:
o.type = d['type']
return o
| 28.098592 | 83 | 0.610025 |
acf0d359d528d932912f05b84167ca4fdf36c866 | 1,940 | py | Python | Anagram.py | sidhu177/Anagramic | 2931383983d932b9fd83d3b4dd99ce55d1f6d396 | [
"MIT"
] | null | null | null | Anagram.py | sidhu177/Anagramic | 2931383983d932b9fd83d3b4dd99ce55d1f6d396 | [
"MIT"
] | 9 | 2019-02-18T13:37:32.000Z | 2019-02-20T20:27:11.000Z | Anagram.py | sidhu177/Anagramic | 2931383983d932b9fd83d3b4dd99ce55d1f6d396 | [
"MIT"
] | null | null | null | ## Importing the library files
from flask import Flask, render_template, flash, request
from wtforms import Form,TextField, TextAreaField, StringField, SubmitField
from flask_bootstrap import Bootstrap
import sys
## Setting Flask Config
DEBUG=False
app = Flask(__name__)
Bootstrap(app)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = 'SuperSecretKey'
strnum=0
## Loading the dictionary file onto memory
file = '/opt/Anagramic/WordList.txt'
def load(file):
try:
with open(file) as in_file:
loaded_text = in_file.read().strip().split('\n')
loaded_text = [x.lower() for x in loaded_text]
return loaded_text
except IOError as e:
print("{}\n Error opening {}. Terminating Program.".format(e,file),file=sys.stderr)
sys.exit(1)
word_list = load(file)
anagram_list = []
## Core Flask Application
class NameForm(Form):
name = TextField('Word: ')
#name = name.lower()
@app.route("/", methods=['GET','POST'])
def Anagram():
global anagram_list
form = NameForm(request.form)
print(form.errors)
if request.method == 'POST':
name = request.form['name']
strnum = len(name)
if strnum==0:
anagram_list='O'
flash('No Word Entered')
else:
name_sorted = sorted(name)
anagram_list = []
for word in word_list:
word = word.lower()
if word!=name:
if sorted(word)==name_sorted:
anagram_list.append(word)
str1 = ''.join(anagram_list)
strnum = len(anagram_list)
str1 = str(strnum)
flash('Total Anagrams found = ' +str1)
return render_template('index.html', form=form, data=anagram_list)
@app.errorhandler(404)
def page_not_found(e):
return "Not Found: " + request.path
if __name__=="__main__":
app.run(host='0.0.0.0')
| 28.955224 | 91 | 0.614948 |
acf0d39b38ece9d0ce8924f27ba9e9add1ef2d7a | 2,079 | py | Python | FastAudioVisual/Input/NameRegular.py | liupeng678/FastAudioVisual | 8ea5be29523fe76d302f66123dbd8adf26aef854 | [
"MIT"
] | 44 | 2020-12-29T16:01:21.000Z | 2021-07-23T02:53:40.000Z | FastAudioVisual/Input/NameRegular.py | liupeng678/FastAudioVisual | 8ea5be29523fe76d302f66123dbd8adf26aef854 | [
"MIT"
] | null | null | null | FastAudioVisual/Input/NameRegular.py | liupeng678/FastAudioVisual | 8ea5be29523fe76d302f66123dbd8adf26aef854 | [
"MIT"
] | 8 | 2020-12-29T22:29:25.000Z | 2021-01-12T18:21:11.000Z | import os
import re
def ReplaceFileName(picPath,fileSuffix=".jpg", regularName="One-",startNumber=0):
"""Modify the file name under the pic_path path.
This function just change your file name , it is not your suffix. Suffix needs to some tools not this code.
# Arguments:
picPath : The filedir in your computer.
fileSuffix: The fileSuxffix you want to select.If none, default = .jpg
regularName : The name you want every file have.
startNumber: The fileName you want start( we set file name into number sequnce.) .If none, default = 0
# Returns
Not,but printing change detail.
"""
picList=os.listdir(picPath)
total_num=len(picList)
i=1
for pic in picList:
if pic.endswith(fileSuffix):#修改成你自己想要重命名的文件格式
oldPath=os.path.join(os.path.abspath(picPath),pic)
newPath=os.path.join(os.path.abspath(picPath),regularName+str(1000+(int(i)))+fileSuffix)#修改成了1000+N这种格式
os.renames(oldPath,newPath)
print ("Change orignial File:"+oldPath+u" to:"+newPath)
i=i+1
def ReplaceDirName(rootDir,startNumber=0):
"""Modify the folder name under the rootDir path.
This function just change your Dir name in one loop.
rootDir : The dir in your computer.
startNumber: The DirName you want start( we set file name into number sequnce.) .If none, default = 0
# Returns
Not,but printing change detail.
"""
num = startNumber
dirs = os.listdir(rootDir)
for dir in dirs:
print('Old name is:' + dir) # 输出老的名字
num = num +1
temp = "%03d" % int(num) #The purpose is to unify the number into 3 digits, and add 0 before the insufficient
oldName = os.path.join(rootDir, dir) # 老文件夹的名字
newName = os.path.join(rootDir, temp) # 新文件夹的名字
os.rename(oldName, newName) #替换
def MoveFileFromSCV(FileSVC):
pass
if __name__ == '__main__':
rootDir = './data'
ReplaceDirName(rootDir)
ReplaceFileName(rootDir)
| 31.029851 | 119 | 0.641655 |
acf0d58bf07567f736cccb027fc0cf8ad8add873 | 4,183 | py | Python | tests/test_interface_init.py | theislab/AutoGeneS | 22bde0d5eba013e90edb85341e0bd9c28b82e7fd | [
"MIT"
] | 46 | 2020-02-25T14:09:21.000Z | 2022-01-20T16:42:40.000Z | tests/test_interface_init.py | theislab/AutoGeneS | 22bde0d5eba013e90edb85341e0bd9c28b82e7fd | [
"MIT"
] | 16 | 2020-03-18T15:08:42.000Z | 2022-01-29T20:00:10.000Z | tests/test_interface_init.py | theislab/AutoGeneS | 22bde0d5eba013e90edb85341e0bd9c28b82e7fd | [
"MIT"
] | 6 | 2020-02-13T14:23:46.000Z | 2021-12-28T16:50:50.000Z | import pytest
import sys
sys.path.insert(0,"..")
import autogenes as ag
import numpy as np
import pandas as pd
import anndata
def test_compute_means():
ann = anndata.AnnData(np.reshape(np.arange(70),(7,10)))
ann.obs['ct'] = ['H1','H2','H1','G','H1','H2','H2']
result = ag.main._Interface__compute_means(ann,'ct')
assert result.X.shape == (3,10)
assert result.var_vector('H1')[0] == 20
def test_interface_init():
with pytest.raises(Exception):
ag.init(None)
# numpy arrays
data = np.identity(3)
ag.init(data)
assert ag.main.data is data
assert ag.main.main.data is data
assert sum(ag.main.pre_selection) == 3
with pytest.raises(Exception):
ag.init(np.zeros((5,2)))
# DataFrame
cols = ["gene_1","gene_2","gene_3"]
df = pd.DataFrame(data,columns=cols)
ag.init(df)
assert np.all(ag.main.data == df.values)
assert np.all(ag.main.main.data == df.values)
assert np.all(ag.main.data_genes == df.columns.values)
assert sum(ag.main.pre_selection) == 3
# Check reset
ag.init(data)
assert ag.main.data_genes is None
# AnnData
test_data = np.zeros((7,5))
test_data[[0,2,4],:] = 1
test_data[[1,5,6],:] = -3
test_data[3,:] = 4
adata = anndata.AnnData(test_data)
genes = [f"gene_{i}" for i in range(5)]
adata.var_names = genes
adata.var["highly_variable"] = np.array([True,True,False,True,False])
adata.var["selection"] = np.array([False,False,True,True,True])
adata.obs["col"] = np.full((7,), True)
# No celltype column
with pytest.raises(ValueError):
ag.init(adata)
adata.obs["celltype"] = ['H1','H2','H1','G','H1','H2','H2']
adata.obs["only_h1"] = ['H1','H1','H1','H1','H1','H1','H1']
adata.obs["celltype2"] = ['H1','H2','H1','H1','H1','H2','H2']
# Simple
ag.init(adata)
test_data_mean = np.repeat(np.array([4,1,-3]).reshape(3,1), 5, axis=1)
assert np.array_equal(ag.main.data_genes, genes)
assert sum(ag.main.pre_selection) == 5
assert np.array_equal(ag.main._adata.var_names, genes)
assert np.array_equal(ag.main._adata.obs_names, ['G', 'H1','H2'])
assert np.array_equal(ag.main._adata.X, test_data_mean)
assert np.array_equal(ag.main.data, test_data_mean)
# celltype_key
with pytest.raises(ValueError):
ag.init(adata,celltype_key="only_h1")
ag.init(adata,celltype_key="celltype2")
assert np.array_equal(ag.main._adata.X, np.repeat(np.array([1.75,-3]).reshape(2,1), 5, axis=1))
# genes_key
ag.init(adata,genes_key="selection")
assert np.array_equal(ag.main._adata.X, test_data_mean)
assert np.array_equal(ag.main.data, test_data_mean[:,[2,3,4]])
assert np.all(ag.main.pre_selection == adata.var["selection"])
# Not the selected genes, but ALL original genes!
assert np.all(ag.main.data_genes == adata.var_names.values)
# use_highly_variable
ag.init(adata,use_highly_variable=True)
assert np.array_equal(ag.main._adata.X, test_data_mean)
assert np.array_equal(ag.main.data, test_data_mean[:,[0,1,3]])
assert np.all(ag.main.pre_selection == adata.var["highly_variable"])
assert np.all(ag.main.data_genes == adata.var_names.values)
# celltype_key (2)
test_data = np.reshape(np.arange(70),(7,10))
ann = anndata.AnnData(test_data)
ann.obs["ct"] = ['H1','H2','H1','G','H1','H2','H2']
ag.init(ann,celltype_key="ct")
assert np.all(ag.main._adata.shape == (3,10))
assert np.all(ag.main._adata.var_vector('H1') == [20+i for i in range(10)])
assert np.all(ag.main._adata.var_vector('G') == [30+i for i in range(10)])
assert np.all(ag.main.data_genes == ann.var_names.values)
assert sum(ag.main.pre_selection) == 10
# celltype_key + genes_key
sel = np.array([True,True,True,False,False,True,False,True,True,True])
ann.var['selection'] = sel
ag.init(ann,celltype_key="ct",genes_key="selection")
# NOT (3,7)! The genes are not applied, but stored in pre_selection
assert ag.main._adata.X.shape == (3,10)
assert ag.main.data.shape == (3,7)
sel_ids, = np.where(sel)
assert np.array_equal(ag.main.data[1], [20+i for i in sel_ids])
assert np.array_equal(ag.main.data[0], [30+i for i in sel_ids])
assert np.array_equal(ag.main.data_genes,ann.var_names)
assert sum(ag.main.pre_selection) == 7
| 32.426357 | 97 | 0.680373 |
acf0d6ea1d23ab2cf63abd4584f0762b83c1a7ae | 587 | py | Python | module_generator/WallGen.py | fengjixuchui/HiddenWall | 720b3e58c813b60ac5b3f0ae24484bf8f3881cd1 | [
"BSD-3-Clause"
] | 335 | 2019-04-23T02:31:38.000Z | 2022-03-30T07:52:28.000Z | module_generator/WallGen.py | fengjixuchui/HiddenWall | 720b3e58c813b60ac5b3f0ae24484bf8f3881cd1 | [
"BSD-3-Clause"
] | 2 | 2019-04-21T19:37:23.000Z | 2019-05-12T22:50:47.000Z | module_generator/WallGen.py | fengjixuchui/HiddenWall | 720b3e58c813b60ac5b3f0ae24484bf8f3881cd1 | [
"BSD-3-Clause"
] | 60 | 2019-05-06T22:36:35.000Z | 2022-03-17T03:18:59.000Z | #!/usr/bin/python3
# WallGen v0.2'/'
#You nd yaml, pyyaml modules...
from util import parser
template_filename=""
rules_filename=""
# Get argvs of user's input
template_filename,rules_filename = parser.arguments()
# load rules of firewall at directory rules
try:
rules_wall=parser.Get_config(rules_filename)
except Exception as e:
print(" log error in config parser rules: "+str(e))
exit(0)
# Load templates and generate
try:
parser.start_generator(template_filename, rules_wall)
except Exception as e:
print(" log error in rule generator: "+str(e))
exit(0)
| 23.48 | 57 | 0.730835 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.