code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for the postgres raster provider.
Note: to prepare the DB, you need to run the sql files specified in
tests/testdata/provider/testdata_pg.sh
Read tests/README.md about writing/launching tests with PostgreSQL.
Run with ctest -V -R PyQgsPostgresProvider
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from builtins import next
__author__ = 'Alessandro Pasotti'
__date__ = '2019-12-20'
__copyright__ = 'Copyright 2019, The QGIS Project'
import qgis # NOQA
import os
import time
from qgis.core import (
QgsSettings,
QgsReadWriteContext,
QgsRectangle,
QgsCoordinateReferenceSystem,
QgsProject,
QgsRasterLayer,
QgsPointXY,
QgsRaster,
QgsProviderRegistry,
QgsRasterBandStats,
QgsDataSourceUri,
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath, compareWkt
QGISAPP = start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestPyQgsPostgresRasterProvider(unittest.TestCase):
@classmethod
def _load_test_table(cls, schemaname, tablename, basename=None):
postgres_conn = cls.dbconn + " sslmode=disable "
md = QgsProviderRegistry.instance().providerMetadata('postgres')
conn = md.createConnection(postgres_conn, {})
if basename is None:
basename = tablename
if tablename not in [n.tableName() for n in conn.tables(schemaname)]:
with open(os.path.join(TEST_DATA_DIR, 'provider', 'postgresraster', basename + '.sql'), 'r') as f:
sql = f.read()
conn.executeSql(sql)
assert (tablename in [n.tableName() for n in conn.tables(
schemaname)]), tablename + ' not found!'
@classmethod
def setUpClass(cls):
"""Run before all tests"""
cls.dbconn = 'service=qgis_test'
if 'QGIS_PGTEST_DB' in os.environ:
cls.dbconn = os.environ['QGIS_PGTEST_DB']
cls._load_test_table('public', 'raster_tiled_3035')
cls._load_test_table('public', 'raster_3035_no_constraints')
cls._load_test_table('public', 'raster_3035_tiled_no_overviews')
cls._load_test_table('public', 'raster_3035_tiled_no_pk')
cls._load_test_table('public', 'raster_3035_tiled_composite_pk')
cls._load_test_table('public', 'raster_3035_untiled_multiple_rows')
cls._load_test_table('idro', 'cosmo_i5_snow', 'bug_34823_pg_raster')
cls._load_test_table(
'public', 'int16_regression_36689', 'bug_36689_pg_raster')
cls._load_test_table('public', 'bug_37968_dem_linear_cdn_extract')
cls._load_test_table('public', 'bug_39017_untiled_no_metadata')
# Fix timing issues in backend
# time.sleep(1)
# Create test layer
cls.rl = QgsRasterLayer(
cls.dbconn + ' sslmode=disable key=\'rid\' srid=3035 table="public"."raster_tiled_3035" sql=', 'test',
'postgresraster')
assert cls.rl.isValid()
cls.source = cls.rl.dataProvider()
def gdal_block_compare(self, rlayer, band, extent, width, height, value):
"""Compare a block result with GDAL raster"""
uri = rlayer.uri()
gdal_uri = "PG: dbname={dbname} mode=2 host={host} port={port} table={table} schema={schema} sslmode=disable".format(
**{
'dbname': uri.database(),
'host': uri.host(),
'port': uri.port(),
'table': uri.table(),
'schema': uri.schema()
})
gdal_rl = QgsRasterLayer(gdal_uri, "rl", "gdal")
self.assertTrue(gdal_rl.isValid())
self.assertEqual(value, gdal_rl.dataProvider().block(
band, self.rl.extent(), 6, 5).data().toHex())
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
def testExtent(self):
extent = self.rl.extent()
self.assertEqual(extent, QgsRectangle(
4080050, 2430625, 4080200, 2430750))
def testSize(self):
self.assertEqual(self.source.xSize(), 6)
self.assertEqual(self.source.ySize(), 5)
def testCrs(self):
self.assertEqual(self.source.crs().authid(), 'EPSG:3035')
def testGetData(self):
identify = self.source.identify(QgsPointXY(
4080137.9, 2430687.9), QgsRaster.IdentifyFormatValue)
expected = 192.51044
self.assertAlmostEqual(identify.results()[1], expected, 4)
def testBlockTiled(self):
expected = b'6a610843880b0e431cc2194306342543b7633c43861858436e0a1143bbad194359612743a12b334317be4343dece59432b621b43f0e42843132b3843ac824043e6cf48436e465a435c4d2d430fa63d43f87a4843b5494a4349454e4374f35b43906e41433ab54c43b056504358575243b1ec574322615f43'
block = self.source.block(1, self.rl.extent(), 6, 5)
actual = block.data().toHex()
self.assertEqual(len(actual), len(expected))
self.assertEqual(actual, expected)
def testNoConstraintRaster(self):
"""Read unconstrained raster layer"""
rl = QgsRasterLayer(
self.dbconn + ' sslmode=disable key=\'pk\' srid=3035 table="public"."raster_3035_no_constraints" sql=',
'test', 'postgresraster')
self.assertTrue(rl.isValid())
def testPkGuessing(self):
"""Read raster layer with no pkey in uri"""
rl = QgsRasterLayer(self.dbconn + ' sslmode=disable srid=3035 table="public"."raster_tiled_3035" sql=', 'test',
'postgresraster')
self.assertTrue(rl.isValid())
def testWhereCondition(self):
"""Read raster layer with where condition"""
rl_nowhere = QgsRasterLayer(
self.dbconn + ' sslmode=disable srid=3035 table="public"."raster_3035_tiled_no_overviews"' +
'sql=', 'test', 'postgresraster')
self.assertTrue(rl_nowhere.isValid())
rl = QgsRasterLayer(
self.dbconn + ' sslmode=disable srid=3035 table="public"."raster_3035_tiled_no_overviews"' +
'sql="category" = \'cat2\'', 'test', 'postgresraster')
self.assertTrue(rl.isValid())
self.assertTrue(not rl.extent().isEmpty())
self.assertNotEqual(rl_nowhere.extent(), rl.extent())
self.assertIsNone(
rl.dataProvider().identify(QgsPointXY(4080137.9, 2430687.9), QgsRaster.IdentifyFormatValue).results()[1])
self.assertIsNotNone(rl_nowhere.dataProvider().identify(QgsPointXY(4080137.9, 2430687.9),
QgsRaster.IdentifyFormatValue).results()[1])
self.assertAlmostEqual(
rl.dataProvider().identify(rl.extent().center(), QgsRaster.IdentifyFormatValue).results()[1], 223.38, 2)
self.assertTrue(compareWkt(rl_nowhere.extent().asWktPolygon(),
'POLYGON((4080050 2430625, 4080200 2430625, 4080200 2430750, 4080050 2430750, 4080050 2430625))'))
self.assertTrue(compareWkt(rl.extent().asWktPolygon(),
'POLYGON((4080150 2430625, 4080200 2430625, 4080200 2430650, 4080150 2430650, 4080150 2430625))'))
self.assertNotEqual(rl.extent(), rl_nowhere.extent())
# Now check if setSubsetString updates the extent
self.assertTrue(rl_nowhere.setSubsetString('"category" = \'cat2\''))
self.assertEqual(rl.extent(), rl_nowhere.extent())
def testNoPk(self):
"""Read raster with no PK"""
rl = QgsRasterLayer(self.dbconn + ' sslmode=disable srid=3035 table="public"."raster_3035_tiled_no_pk"' +
'sql=', 'test', 'postgresraster')
self.assertTrue(rl.isValid())
def testCompositeKey(self):
"""Read raster with composite pks"""
rl = QgsRasterLayer(
self.dbconn + ' sslmode=disable srid=3035 table="public"."raster_3035_tiled_composite_pk"' +
'sql=', 'test', 'postgresraster')
self.assertTrue(rl.isValid())
data = rl.dataProvider().block(1, rl.extent(), 3, 3)
self.assertEqual(int(data.value(0, 0)), 142)
@unittest.skip('Performance test is disabled in Travis environment')
def testSpeed(self):
"""Compare speed with GDAL provider, this test was used during development"""
conn = "user={user} host=localhost port=5432 password={password} dbname={speed_db} ".format(
user=os.environ.get('USER'),
password=os.environ.get('USER'),
speed_db='qgis_test'
)
table = 'basic_map_tiled'
schema = 'public'
def _speed_check(schema, table, width, height):
print('-' * 80)
print("Testing: {schema}.{table}".format(
table=table, schema=schema))
print('-' * 80)
# GDAL
start = time.time()
rl = QgsRasterLayer(
"PG: " + conn +
"table={table} mode=2 schema={schema}".format(
table=table, schema=schema), 'gdal_layer',
'gdal')
self.assertTrue(rl.isValid())
# Make is smaller than full extent
extent = rl.extent().buffered(-rl.extent().width() * 0.2)
checkpoint_1 = time.time()
print("Tiled GDAL start time: {:.6f}".format(checkpoint_1 - start))
rl.dataProvider().block(1, extent, width, height)
checkpoint_2 = time.time()
print("Tiled GDAL first block time: {:.6f}".format(
checkpoint_2 - checkpoint_1))
# rl.dataProvider().block(1, extent, width, height)
checkpoint_3 = time.time()
print("Tiled GDAL second block time: {:.6f}".format(
checkpoint_3 - checkpoint_2))
print("Total GDAL time: {:.6f}".format(checkpoint_3 - start))
print('-' * 80)
# PG native
start = time.time()
rl = QgsRasterLayer(conn + "table={table} schema={schema}".format(table=table, schema=schema), 'gdal_layer',
'postgresraster')
self.assertTrue(rl.isValid())
extent = rl.extent().buffered(-rl.extent().width() * 0.2)
checkpoint_1 = time.time()
print("Tiled PG start time: {:.6f}".format(checkpoint_1 - start))
rl.dataProvider().block(1, extent, width, height)
checkpoint_2 = time.time()
print("Tiled PG first block time: {:.6f}".format(
checkpoint_2 - checkpoint_1))
rl.dataProvider().block(1, extent, width, height)
checkpoint_3 = time.time()
print("Tiled PG second block time: {:.6f}".format(
checkpoint_3 - checkpoint_2))
print("Total PG time: {:.6f}".format(checkpoint_3 - start))
print('-' * 80)
_speed_check(schema, table, 1000, 1000)
def testOtherSchema(self):
"""Test that a layer in a different schema than public can be loaded
See: GH #34823"""
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema}".format(
table='cosmo_i5_snow', schema='idro'),
'pg_layer', 'postgresraster')
self.assertTrue(rl.isValid())
self.assertTrue(compareWkt(rl.extent().asWktPolygon(),
'POLYGON((-64.79286766849691048 -77.26689086732433509, -62.18292922825105506 -77.26689086732433509, -62.18292922825105506 -74.83694818157819384, -64.79286766849691048 -74.83694818157819384, -64.79286766849691048 -77.26689086732433509))'))
def testUntiledMultipleRows(self):
"""Test multiple rasters (one per row)"""
rl = QgsRasterLayer(self.dbconn + " sslmode=disable table={table} schema={schema} sql=\"pk\" = 1".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl.isValid())
block = rl.dataProvider().block(1, rl.extent(), 2, 2)
data = []
for i in range(2):
for j in range(2):
data.append(int(block.value(i, j)))
self.assertEqual(data, [136, 142, 145, 153])
rl = QgsRasterLayer(self.dbconn + " sslmode=disable table={table} schema={schema} sql=\"pk\" = 2".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl.isValid())
block = rl.dataProvider().block(1, rl.extent(), 2, 2)
data = []
for i in range(2):
for j in range(2):
data.append(int(block.value(i, j)))
self.assertEqual(data, [136, 142, 161, 169])
def testSetSubsetString(self):
"""Test setSubsetString"""
rl = QgsRasterLayer(self.dbconn + " sslmode=disable table={table} schema={schema} sql=\"pk\" = 2".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl.isValid())
block = rl.dataProvider().block(1, rl.extent(), 2, 2)
data = []
for i in range(2):
for j in range(2):
data.append(int(block.value(i, j)))
self.assertEqual(data, [136, 142, 161, 169])
stats = rl.dataProvider().bandStatistics(
1, QgsRasterBandStats.Min | QgsRasterBandStats.Max, rl.extent())
self.assertEqual(int(stats.minimumValue), 136)
self.assertEqual(int(stats.maximumValue), 169)
ce = rl.renderer().contrastEnhancement()
min_max = int(ce.minimumValue()), int(ce.maximumValue())
self.assertEqual(min_max, (136, 169))
# Change filter:
self.assertTrue(rl.setSubsetString('"pk" = 1'))
block = rl.dataProvider().block(1, rl.extent(), 2, 2)
data = []
for i in range(2):
for j in range(2):
data.append(int(block.value(i, j)))
self.assertEqual(data, [136, 142, 145, 153])
# Check that we have new statistics
stats = rl.dataProvider().bandStatistics(
1, QgsRasterBandStats.Min | QgsRasterBandStats.Max, rl.extent())
self.assertEqual(int(stats.minimumValue), 136)
self.assertEqual(int(stats.maximumValue), 153)
# Check that the renderer has been updated
ce = rl.renderer().contrastEnhancement()
new_min_max = int(ce.minimumValue()), int(ce.maximumValue())
self.assertNotEqual(new_min_max, min_max)
self.assertEqual(new_min_max, (136, 153))
# Set invalid filter
self.assertFalse(rl.setSubsetString('"pk_wrong" = 1'))
block = rl.dataProvider().block(1, rl.extent(), 2, 2)
data = []
for i in range(2):
for j in range(2):
data.append(int(block.value(i, j)))
self.assertEqual(data, [136, 142, 145, 153])
def testTime(self):
"""Test time series and time subset string when default value is set"""
def _test_block(rl, expected_block, expected_single):
self.assertTrue(rl.isValid())
block = rl.dataProvider().block(1, rl.extent(), 2, 2)
data = []
for i in range(2):
for j in range(2):
data.append(int(block.value(i, j)))
self.assertEqual(data, expected_block)
block = rl.dataProvider().block(1, rl.extent(), 1, 1)
self.assertEqual(int(block.value(0, 0)), expected_single)
# First check that setting different temporal default values we get different results
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} temporalDefaultTime='2020-04-01T00:00:00' temporalFieldIndex='1'".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertEqual(rl.subsetString(), "")
_test_block(rl, [136, 142, 145, 153], 153)
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} temporalDefaultTime='2020-04-05T00:00:00' temporalFieldIndex='1'".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertEqual(rl.subsetString(), "")
_test_block(rl, [136, 142, 161, 169], 169)
# Check that manually setting a subsetString we get the same results
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} sql=\"data\" = '2020-04-01'".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertEqual(rl.subsetString(), '"data" = \'2020-04-01\'')
_test_block(rl, [136, 142, 145, 153], 153)
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} sql=\"data\" = '2020-04-05'".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertEqual(rl.subsetString(), '"data" = \'2020-04-05\'')
_test_block(rl, [136, 142, 161, 169], 169)
# Now check if the varchar temporal field works the same
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} temporalDefaultTime='2020-04-01T00:00:00' temporalFieldIndex='2'".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertEqual(rl.subsetString(), '')
_test_block(rl, [136, 142, 145, 153], 153)
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} temporalDefaultTime='2020-04-05T00:00:00' temporalFieldIndex='2'".format(
table='raster_3035_untiled_multiple_rows', schema='public'), 'pg_layer', 'postgresraster')
self.assertEqual(rl.subsetString(), '')
_test_block(rl, [136, 142, 161, 169], 169)
def testMetadataEncodeDecode(self):
"""Round trip tests on URIs"""
def _round_trip(uri):
decoded = md.decodeUri(uri)
self.assertEqual(decoded, md.decodeUri(md.encodeUri(decoded)))
uri = self.dbconn + \
' sslmode=disable key=\'rid\' srid=3035 table="public"."raster_tiled_3035" sql='
md = QgsProviderRegistry.instance().providerMetadata('postgresraster')
decoded = md.decodeUri(uri)
self.assertEqual(decoded, {
'key': 'rid',
'schema': 'public',
'service': 'qgis_test',
'srid': '3035',
'sslmode': QgsDataSourceUri.SslDisable,
'table': 'raster_tiled_3035',
})
_round_trip(uri)
uri = self.dbconn + \
' sslmode=prefer key=\'rid\' srid=3035 temporalFieldIndex=2 temporalDefaultTime=2020-03-02 ' + \
'authcfg=afebeff username=\'my username\' password=\'my secret password=\' ' + \
'enableTime=true table="public"."raster_tiled_3035" (rast) sql="a_field" != 1223223'
_round_trip(uri)
decoded = md.decodeUri(uri)
self.assertEqual(decoded, {
'authcfg': 'afebeff',
'enableTime': 'true',
'geometrycolumn': 'rast',
'key': 'rid',
'password': 'my secret password=',
'schema': 'public',
'service': 'qgis_test',
'sql': '"a_field" != 1223223',
'srid': '3035',
'sslmode': QgsDataSourceUri.SslPrefer,
'table': 'raster_tiled_3035',
'temporalDefaultTime':
'2020-03-02',
'temporalFieldIndex': '2',
'username': 'my username',
})
def testInt16(self):
"""Test regression https://github.com/qgis/QGIS/issues/36689"""
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema}".format(
table='int16_regression_36689', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl.isValid())
block = rl.dataProvider().block(1, rl.extent(), 6, 6)
data = []
for i in range(6):
for j in range(6):
data.append(int(block.value(i, j)))
self.assertEqual(data, [55, 52, 46, 39, 33, 30, 58, 54, 49, 45, 41, 37, 58, 54, 50,
47, 45, 43, 54, 51, 49, 47, 46, 44, 47, 47, 47, 47, 46, 45, 41, 43, 45, 48, 49, 46])
def testNegativeScaleY(self):
"""Test regression https://github.com/qgis/QGIS/issues/37968
Y is growing south
"""
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema}".format(
table='bug_37968_dem_linear_cdn_extract', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl.isValid())
self.assertTrue(compareWkt(rl.extent().asWktPolygon(
), 'POLYGON((-40953 170588, -40873 170588, -40873 170668, -40953 170668, -40953 170588))', 1))
block = rl.dataProvider().block(1, rl.extent(), 6, 6)
data = []
for i in range(6):
for j in range(6):
data.append(int(block.value(i, j)))
self.assertEqual(data, [52, 52, 52, 52, 44, 43, 52, 52, 52, 48, 44, 44, 49, 52, 49, 44, 44, 44, 43, 47, 46, 44, 44, 44, 42, 42, 43, 44, 44, 48, 42, 43, 43, 44, 44, 47])
def testUntiledMosaicNoMetadata(self):
"""Test regression https://github.com/qgis/QGIS/issues/39017
+-----------+------------------------------+
| | |
| rid = 1 | rid = 2 |
| | |
+-----------+------------------------------+
"""
rl = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema}".format(
table='bug_39017_untiled_no_metadata', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl.isValid())
self.assertTrue(compareWkt(rl.extent().asWktPolygon(
), 'POLYGON((47.061 40.976, 47.123 40.976, 47.123 41.000, 47.061 41.000, 47.061 40.976))', 0.01))
rl1 = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} sql=\"rid\"=1".format(
table='bug_39017_untiled_no_metadata', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl1.isValid())
self.assertTrue(compareWkt(rl1.extent().asWktPolygon(
), 'POLYGON((47.061 40.976, 47.070 40.976, 47.070 41.000, 47.061 41.000, 47.061 40.976))', 0.01))
rl2 = QgsRasterLayer(
self.dbconn + " sslmode=disable table={table} schema={schema} sql=\"rid\"=2".format(
table='bug_39017_untiled_no_metadata', schema='public'), 'pg_layer', 'postgresraster')
self.assertTrue(rl2.isValid())
self.assertTrue(compareWkt(rl2.extent().asWktPolygon(
), 'POLYGON((47.061 40.976, 47.123 40.976, 47.123 41.000, 47.070 41.000, 47.070 40.976))', 0.01))
extent_1 = rl1.extent()
extent_2 = rl2.extent()
def _6x6_block_data(layer, extent):
block = layer.dataProvider().block(1, extent, 6, 6)
data = []
for i in range(6):
for j in range(6):
data.append(int(block.value(i, j)))
return data
rl_r1 = _6x6_block_data(rl, extent_1)
r1_r1 = _6x6_block_data(rl1, extent_1)
self.assertEqual(rl_r1, r1_r1)
rl_r2 = _6x6_block_data(rl, extent_2)
r2_r2 = _6x6_block_data(rl2, extent_2)
self.assertEqual(rl_r2, r2_r2)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#*******************************************************************************
# Copyright (c) 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import pprint
from pyspark.sql import SparkSession
spark = SparkSession\
.builder\
.appName("Cloudant Spark SQL Example in Python using temp tables")\
.config("cloudant.host","ACCOUNT.cloudant.com")\
.config("cloudant.username", "USERNAME")\
.config("cloudant.password","PASSWORD")\
.getOrCreate()
# ***1. Loading temp table from Cloudant db
spark.sql(" CREATE TEMPORARY TABLE airportTable USING com.cloudant.spark OPTIONS ( database 'n_airportcodemapping')")
airportData = spark.sql("SELECT _id, airportName FROM airportTable WHERE _id >= 'CAA' AND _id <= 'GAA' ORDER BY _id")
airportData.printSchema()
print 'Total # of rows in airportData: ' + str(airportData.count())
for code in airportData.collect():
print code._id
# ***2. Loading temp table from Cloudant search index
print 'About to test com.cloudant.spark for flight with index'
spark.sql(" CREATE TEMPORARY TABLE flightTable1 USING com.cloudant.spark OPTIONS ( database 'n_flight', index '_design/view/_search/n_flights')")
flightData = spark.sql("SELECT flightSegmentId, scheduledDepartureTime FROM flightTable1 WHERE flightSegmentId >'AA9' AND flightSegmentId<'AA95'")
flightData.printSchema()
for code in flightData.collect():
print 'Flight {0} on {1}'.format(code.flightSegmentId, code.scheduledDepartureTime)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_dns
version_added: '2.8'
short_description: Configure FlashArray DNS settings
description:
- Set or erase configuration for the DNS settings.
- Nameservers provided will overwrite any existing nameservers.
author:
- Simon Dodsley (@sdodsley)
options:
state:
description:
- Set or delete directory service configuration
default: present
choices: [ absent, present ]
domain:
description:
- Domain suffix to be appended when perofrming DNS lookups.
nameservers:
description:
- List of up to 3 unique DNS server IP addresses. These can be
IPv4 or IPv6 - No validation is done of the addresses is performed.
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Delete exisitng DNS settings
purefa_dns:
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Set DNS settings
purefa_dns:
domain: purestorage.com
nameservers:
- 8.8.8.8
- 8.8.4.4
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def delete_dns(module, array):
"""Delete DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] == '' and current_dns['nameservers'] == ['']:
module.exit_json(changed=changed)
else:
try:
array.set_dns(domain='', nameservers=[])
changed = True
except Exception:
module.fail_json(msg='Delete DNS settigs failed')
module.exit_json(changed=changed)
def create_dns(module, array):
"""Set DNS settings"""
changed = False
current_dns = array.get_dns()
if current_dns['domain'] != module.params['domain'] or sorted(module.params['nameservers']) != sorted(current_dns['nameservers']):
try:
array.set_dns(domain=module.params['domain'],
nameservers=module.params['nameservers'][0:3])
changed = True
except Exception:
module.fail_json(msg='Set DNS settings failed: Check configuration')
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
domain=dict(type='str'),
nameservers=dict(type='list'),
))
required_if = [('state', 'present', ['domain', 'nameservers'])]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=False)
state = module.params['state']
array = get_system(module)
if state == 'absent':
delete_dns(module, array)
elif state == 'present':
module.params['nameservers'] = remove(module.params['nameservers'])
create_dns(module, array)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright (c) 2016 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Lin, Wanming <wanming.lin@intel.com>
import unittest
import os
import sys
import commands
import comm
class TestWebAppInstall(unittest.TestCase):
def test_install(self):
comm.setUp()
app_name = "helloworld"
pkg_name = "com.example." + app_name.lower()
comm.app_install(app_name, pkg_name, self)
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2014 Manu Martinez-Almeida. All rights reserved.
// Use of this source code is governed by a MIT style
// license that can be found in the LICENSE file.
package gin
import (
"bytes"
"context"
"errors"
"fmt"
"html/template"
"io"
"io/fs"
"mime/multipart"
"net"
"net/http"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/gin-contrib/sse"
"github.com/gin-gonic/gin/binding"
"github.com/gin-gonic/gin/codec/json"
testdata "github.com/gin-gonic/gin/testdata/protoexample"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/bson"
"google.golang.org/protobuf/proto"
)
var _ context.Context = (*Context)(nil)
var errTestRender = errors.New("TestRender")
// Unit tests TODO
// func (c *Context) File(filepath string) {
// func (c *Context) Negotiate(code int, config Negotiate) {
// BAD case: func (c *Context) Render(code int, render render.Render, obj ...any) {
// test that information is not leaked when reusing Contexts (using the Pool)
func createMultipartRequest() *http.Request {
boundary := "--testboundary"
body := new(bytes.Buffer)
mw := multipart.NewWriter(body)
defer mw.Close()
must(mw.SetBoundary(boundary))
must(mw.WriteField("foo", "bar"))
must(mw.WriteField("bar", "10"))
must(mw.WriteField("bar", "foo2"))
must(mw.WriteField("array", "first"))
must(mw.WriteField("array", "second"))
must(mw.WriteField("id", ""))
must(mw.WriteField("time_local", "31/12/2016 14:55"))
must(mw.WriteField("time_utc", "31/12/2016 14:55"))
must(mw.WriteField("time_location", "31/12/2016 14:55"))
must(mw.WriteField("names[a]", "thinkerou"))
must(mw.WriteField("names[b]", "tianou"))
req, err := http.NewRequest(http.MethodPost, "/", body)
must(err)
req.Header.Set("Content-Type", MIMEMultipartPOSTForm+"; boundary="+boundary)
return req
}
func must(err error) {
if err != nil {
panic(err.Error())
}
}
// TestContextFile tests the Context.File() method
func TestContextFile(t *testing.T) {
// Test serving an existing file
t.Run("serve existing file", func(t *testing.T) {
// Create a temporary test file
testFile := "testdata/test_file.txt"
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodGet, "/test", nil)
c.File(testFile)
assert.Equal(t, http.StatusOK, w.Code)
assert.Contains(t, w.Body.String(), "This is a test file")
assert.Equal(t, "text/plain; charset=utf-8", w.Header().Get("Content-Type"))
})
// Test serving a non-existent file
t.Run("serve non-existent file", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodGet, "/test", nil)
c.File("non_existent_file.txt")
assert.Equal(t, http.StatusNotFound, w.Code)
})
// Test serving a directory (should return 200 with directory listing or 403 Forbidden)
t.Run("serve directory", func(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodGet, "/test", nil)
c.File(".")
// Directory serving can return either 200 (with listing) or 403 (forbidden)
assert.True(t, w.Code == http.StatusOK || w.Code == http.StatusForbidden)
})
// Test with HEAD request
t.Run("HEAD request", func(t *testing.T) {
testFile := "testdata/test_file.txt"
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodHead, "/test", nil)
c.File(testFile)
assert.Equal(t, http.StatusOK, w.Code)
assert.Empty(t, w.Body.String()) // HEAD request should not return body
assert.Equal(t, "text/plain; charset=utf-8", w.Header().Get("Content-Type"))
})
// Test with Range request
t.Run("Range request", func(t *testing.T) {
testFile := "testdata/test_file.txt"
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodGet, "/test", nil)
c.Request.Header.Set("Range", "bytes=0-10")
c.File(testFile)
assert.Equal(t, http.StatusPartialContent, w.Code)
assert.Equal(t, "bytes", w.Header().Get("Accept-Ranges"))
assert.Contains(t, w.Header().Get("Content-Range"), "bytes 0-10")
})
}
func TestContextFormFile(t *testing.T) {
buf := new(bytes.Buffer)
mw := multipart.NewWriter(buf)
w, err := mw.CreateFormFile("file", "test")
require.NoError(t, err)
_, err = w.Write([]byte("test"))
require.NoError(t, err)
mw.Close()
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", buf)
c.Request.Header.Set("Content-Type", mw.FormDataContentType())
f, err := c.FormFile("file")
require.NoError(t, err)
assert.Equal(t, "test", f.Filename)
require.NoError(t, c.SaveUploadedFile(f, "test"))
}
func TestContextFormFileFailed(t *testing.T) {
buf := new(bytes.Buffer)
mw := multipart.NewWriter(buf)
mw.Close()
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Set("Content-Type", mw.FormDataContentType())
c.engine.MaxMultipartMemory = 8 << 20
f, err := c.FormFile("file")
require.Error(t, err)
assert.Nil(t, f)
}
func TestContextMultipartForm(t *testing.T) {
buf := new(bytes.Buffer)
mw := multipart.NewWriter(buf)
require.NoError(t, mw.WriteField("foo", "bar"))
w, err := mw.CreateFormFile("file", "test")
require.NoError(t, err)
_, err = w.Write([]byte("test"))
require.NoError(t, err)
mw.Close()
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", buf)
c.Request.Header.Set("Content-Type", mw.FormDataContentType())
f, err := c.MultipartForm()
require.NoError(t, err)
assert.NotNil(t, f)
require.NoError(t, c.SaveUploadedFile(f.File["file"][0], "test"))
}
func TestSaveUploadedOpenFailed(t *testing.T) {
buf := new(bytes.Buffer)
mw := multipart.NewWriter(buf)
mw.Close()
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", buf)
c.Request.Header.Set("Content-Type", mw.FormDataContentType())
f := &multipart.FileHeader{
Filename: "file",
}
require.Error(t, c.SaveUploadedFile(f, "test"))
}
func TestSaveUploadedCreateFailed(t *testing.T) {
buf := new(bytes.Buffer)
mw := multipart.NewWriter(buf)
w, err := mw.CreateFormFile("file", "test")
require.NoError(t, err)
_, err = w.Write([]byte("test"))
require.NoError(t, err)
mw.Close()
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", buf)
c.Request.Header.Set("Content-Type", mw.FormDataContentType())
f, err := c.FormFile("file")
require.NoError(t, err)
assert.Equal(t, "test", f.Filename)
require.Error(t, c.SaveUploadedFile(f, "/"))
}
func TestSaveUploadedFileWithPermission(t *testing.T) {
buf := new(bytes.Buffer)
mw := multipart.NewWriter(buf)
w, err := mw.CreateFormFile("file", "permission_test")
require.NoError(t, err)
_, err = w.Write([]byte("permission_test"))
require.NoError(t, err)
mw.Close()
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", buf)
c.Request.Header.Set("Content-Type", mw.FormDataContentType())
f, err := c.FormFile("file")
require.NoError(t, err)
assert.Equal(t, "permission_test", f.Filename)
var mode fs.FileMode = 0o755
require.NoError(t, c.SaveUploadedFile(f, "permission_test", mode))
t.Cleanup(func() {
assert.NoError(t, os.Remove("permission_test"))
})
info, err := os.Stat(filepath.Dir("permission_test"))
require.NoError(t, err)
assert.Equal(t, info.Mode().Perm(), mode)
}
func TestSaveUploadedFileWithPermissionFailed(t *testing.T) {
buf := new(bytes.Buffer)
mw := multipart.NewWriter(buf)
w, err := mw.CreateFormFile("file", "permission_test")
require.NoError(t, err)
_, err = w.Write([]byte("permission_test"))
require.NoError(t, err)
mw.Close()
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", buf)
c.Request.Header.Set("Content-Type", mw.FormDataContentType())
f, err := c.FormFile("file")
require.NoError(t, err)
assert.Equal(t, "permission_test", f.Filename)
var mode fs.FileMode = 0o644
require.Error(t, c.SaveUploadedFile(f, "test/permission_test", mode))
}
func TestContextReset(t *testing.T) {
router := New()
c := router.allocateContext(0)
assert.Equal(t, c.engine, router)
c.index = 2
c.Writer = &responseWriter{ResponseWriter: httptest.NewRecorder()}
c.Params = Params{Param{}}
c.Error(errors.New("test")) //nolint: errcheck
c.Set("foo", "bar")
c.reset()
assert.False(t, c.IsAborted())
assert.Nil(t, c.Keys)
assert.Nil(t, c.Accepted)
assert.Empty(t, c.Errors)
assert.Empty(t, c.Errors.Errors())
assert.Empty(t, c.Errors.ByType(ErrorTypeAny))
assert.Empty(t, c.Params)
assert.EqualValues(t, -1, c.index)
assert.Equal(t, c.Writer.(*responseWriter), &c.writermem)
}
func TestContextHandlers(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
assert.Nil(t, c.handlers)
assert.Nil(t, c.handlers.Last())
c.handlers = HandlersChain{}
assert.NotNil(t, c.handlers)
assert.Nil(t, c.handlers.Last())
f := func(c *Context) {}
g := func(c *Context) {}
c.handlers = HandlersChain{f}
compareFunc(t, f, c.handlers.Last())
c.handlers = HandlersChain{f, g}
compareFunc(t, g, c.handlers.Last())
}
// TestContextSetGet tests that a parameter is set correctly on the
// current context and can be retrieved using Get.
func TestContextSetGet(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("foo", "bar")
value, err := c.Get("foo")
assert.Equal(t, "bar", value)
assert.True(t, err)
value, err = c.Get("foo2")
assert.Nil(t, value)
assert.False(t, err)
assert.Equal(t, "bar", c.MustGet("foo"))
assert.Panicsf(t, func() {
c.MustGet("no_exist")
}, "key no_exist does not exist")
}
func TestContextSetGetAnyKey(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
type key struct{}
tests := []struct {
key any
}{
{1},
{int32(1)},
{int64(1)},
{uint(1)},
{float32(1)},
{key{}},
{&key{}},
}
for _, tt := range tests {
t.Run(reflect.TypeOf(tt.key).String(), func(t *testing.T) {
c.Set(tt.key, 1)
value, ok := c.Get(tt.key)
assert.True(t, ok)
assert.Equal(t, 1, value)
})
}
}
func TestContextSetGetPanicsWhenKeyNotComparable(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
assert.Panics(t, func() {
c.Set([]int{1}, 1)
c.Set(func() {}, 1)
c.Set(make(chan int), 1)
})
}
func TestContextSetGetValues(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("string", "this is a string")
c.Set("int32", int32(-42))
c.Set("int64", int64(42424242424242))
c.Set("uint64", uint64(42))
c.Set("float32", float32(4.2))
c.Set("float64", 4.2)
var a any = 1
c.Set("intInterface", a)
assert.Exactly(t, "this is a string", c.MustGet("string").(string))
assert.Exactly(t, int32(-42), c.MustGet("int32").(int32))
assert.Exactly(t, int64(42424242424242), c.MustGet("int64").(int64))
assert.Exactly(t, uint64(42), c.MustGet("uint64").(uint64))
assert.InDelta(t, float32(4.2), c.MustGet("float32").(float32), 0.01)
assert.InDelta(t, 4.2, c.MustGet("float64").(float64), 0.01)
assert.Exactly(t, 1, c.MustGet("intInterface").(int))
}
func TestContextGetString(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("string", "this is a string")
assert.Equal(t, "this is a string", c.GetString("string"))
}
func TestContextSetGetBool(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("bool", true)
assert.True(t, c.GetBool("bool"))
}
func TestSetGetDelete(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "example-key"
value := "example-value"
c.Set(key, value)
val, exists := c.Get(key)
assert.True(t, exists)
assert.Equal(t, val, value)
c.Delete(key)
_, exists = c.Get(key)
assert.False(t, exists)
}
func TestContextGetInt(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("int", 1)
assert.Equal(t, 1, c.GetInt("int"))
}
func TestContextGetInt8(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int8"
value := int8(0x7F)
c.Set(key, value)
assert.Equal(t, value, c.GetInt8(key))
}
func TestContextGetInt16(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int16"
value := int16(0x7FFF)
c.Set(key, value)
assert.Equal(t, value, c.GetInt16(key))
}
func TestContextGetInt32(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int32"
value := int32(0x7FFFFFFF)
c.Set(key, value)
assert.Equal(t, value, c.GetInt32(key))
}
func TestContextGetInt64(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("int64", int64(42424242424242))
assert.Equal(t, int64(42424242424242), c.GetInt64("int64"))
}
func TestContextGetUint(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("uint", uint(1))
assert.Equal(t, uint(1), c.GetUint("uint"))
}
func TestContextGetUint8(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint8"
value := uint8(0xFF)
c.Set(key, value)
assert.Equal(t, value, c.GetUint8(key))
}
func TestContextGetUint16(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint16"
value := uint16(0xFFFF)
c.Set(key, value)
assert.Equal(t, value, c.GetUint16(key))
}
func TestContextGetUint32(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint32"
value := uint32(0xFFFFFFFF)
c.Set(key, value)
assert.Equal(t, value, c.GetUint32(key))
}
func TestContextGetUint64(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("uint64", uint64(18446744073709551615))
assert.Equal(t, uint64(18446744073709551615), c.GetUint64("uint64"))
}
func TestContextGetFloat32(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "float32"
value := float32(3.14)
c.Set(key, value)
assert.InDelta(t, value, c.GetFloat32(key), 0.01)
}
func TestContextGetFloat64(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("float64", 4.2)
assert.InDelta(t, 4.2, c.GetFloat64("float64"), 0.01)
}
func TestContextGetTime(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
t1, _ := time.Parse("1/2/2006 15:04:05", "01/01/2017 12:00:00")
c.Set("time", t1)
assert.Equal(t, t1, c.GetTime("time"))
}
func TestContextGetDuration(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("duration", time.Second)
assert.Equal(t, time.Second, c.GetDuration("duration"))
}
func TestContextGetError(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "error"
value := errors.New("test error")
c.Set(key, value)
assert.Equal(t, value, c.GetError(key))
}
func TestContextGetIntSlice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int-slice"
value := []int{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetIntSlice(key))
}
func TestContextGetInt8Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int8-slice"
value := []int8{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetInt8Slice(key))
}
func TestContextGetInt16Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int16-slice"
value := []int16{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetInt16Slice(key))
}
func TestContextGetInt32Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int32-slice"
value := []int32{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetInt32Slice(key))
}
func TestContextGetInt64Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "int64-slice"
value := []int64{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetInt64Slice(key))
}
func TestContextGetUintSlice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint-slice"
value := []uint{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetUintSlice(key))
}
func TestContextGetUint8Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint8-slice"
value := []uint8{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetUint8Slice(key))
}
func TestContextGetUint16Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint16-slice"
value := []uint16{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetUint16Slice(key))
}
func TestContextGetUint32Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint32-slice"
value := []uint32{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetUint32Slice(key))
}
func TestContextGetUint64Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "uint64-slice"
value := []uint64{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetUint64Slice(key))
}
func TestContextGetFloat32Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "float32-slice"
value := []float32{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetFloat32Slice(key))
}
func TestContextGetFloat64Slice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "float64-slice"
value := []float64{1, 2}
c.Set(key, value)
assert.Equal(t, value, c.GetFloat64Slice(key))
}
func TestContextGetStringSlice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Set("slice", []string{"foo"})
assert.Equal(t, []string{"foo"}, c.GetStringSlice("slice"))
}
func TestContextGetErrorSlice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
key := "error-slice"
value := []error{errors.New("error1"), errors.New("error2")}
c.Set(key, value)
assert.Equal(t, value, c.GetErrorSlice(key))
}
func TestContextGetStringMap(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
m := make(map[string]any)
m["foo"] = 1
c.Set("map", m)
assert.Equal(t, m, c.GetStringMap("map"))
assert.Equal(t, 1, c.GetStringMap("map")["foo"])
}
func TestContextGetStringMapString(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
m := make(map[string]string)
m["foo"] = "bar"
c.Set("map", m)
assert.Equal(t, m, c.GetStringMapString("map"))
assert.Equal(t, "bar", c.GetStringMapString("map")["foo"])
}
func TestContextGetStringMapStringSlice(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
m := make(map[string][]string)
m["foo"] = []string{"foo"}
c.Set("map", m)
assert.Equal(t, m, c.GetStringMapStringSlice("map"))
assert.Equal(t, []string{"foo"}, c.GetStringMapStringSlice("map")["foo"])
}
func TestContextCopy(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.index = 2
c.Request, _ = http.NewRequest(http.MethodPost, "/hola", nil)
c.handlers = HandlersChain{func(c *Context) {}}
c.Params = Params{Param{Key: "foo", Value: "bar"}}
c.Set("foo", "bar")
c.fullPath = "/hola"
cp := c.Copy()
assert.Nil(t, cp.handlers)
assert.Nil(t, cp.writermem.ResponseWriter)
assert.Equal(t, &cp.writermem, cp.Writer.(*responseWriter))
assert.Equal(t, cp.Request, c.Request)
assert.Equal(t, abortIndex, cp.index)
assert.Equal(t, cp.Keys, c.Keys)
assert.Equal(t, cp.engine, c.engine)
assert.Equal(t, cp.Params, c.Params)
cp.Set("foo", "notBar")
assert.NotEqual(t, cp.Keys["foo"], c.Keys["foo"])
assert.Equal(t, cp.fullPath, c.fullPath)
}
func TestContextHandlerName(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.handlers = HandlersChain{func(c *Context) {}, handlerNameTest}
assert.Regexp(t, "^(.*/vendor/)?github.com/gin-gonic/gin.handlerNameTest$", c.HandlerName())
}
func TestContextHandlerNames(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.handlers = HandlersChain{func(c *Context) {}, nil, handlerNameTest, func(c *Context) {}, handlerNameTest2}
names := c.HandlerNames()
assert.Len(t, names, 4)
for _, name := range names {
assert.Regexp(t, `^(.*/vendor/)?(github\.com/gin-gonic/gin\.){1}(TestContextHandlerNames\.func.*){0,1}(handlerNameTest.*){0,1}`, name)
}
}
func handlerNameTest(c *Context) {
}
func handlerNameTest2(c *Context) {
}
var handlerTest HandlerFunc = func(c *Context) {
}
func TestContextHandler(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.handlers = HandlersChain{func(c *Context) {}, handlerTest}
assert.Equal(t, reflect.ValueOf(handlerTest).Pointer(), reflect.ValueOf(c.Handler()).Pointer())
}
func TestContextQuery(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodGet, "http://example.com/?foo=bar&page=10&id=", nil)
value, ok := c.GetQuery("foo")
assert.True(t, ok)
assert.Equal(t, "bar", value)
assert.Equal(t, "bar", c.DefaultQuery("foo", "none"))
assert.Equal(t, "bar", c.Query("foo"))
value, ok = c.GetQuery("page")
assert.True(t, ok)
assert.Equal(t, "10", value)
assert.Equal(t, "10", c.DefaultQuery("page", "0"))
assert.Equal(t, "10", c.Query("page"))
value, ok = c.GetQuery("id")
assert.True(t, ok)
assert.Empty(t, value)
assert.Empty(t, c.DefaultQuery("id", "nada"))
assert.Empty(t, c.Query("id"))
value, ok = c.GetQuery("NoKey")
assert.False(t, ok)
assert.Empty(t, value)
assert.Equal(t, "nada", c.DefaultQuery("NoKey", "nada"))
assert.Empty(t, c.Query("NoKey"))
// postform should not mess
value, ok = c.GetPostForm("page")
assert.False(t, ok)
assert.Empty(t, value)
assert.Empty(t, c.PostForm("foo"))
}
func TestContextInitQueryCache(t *testing.T) {
validURL, err := url.Parse("https://github.com/gin-gonic/gin/pull/3969?key=value&otherkey=othervalue")
require.NoError(t, err)
tests := []struct {
testName string
testContext *Context
expectedQueryCache url.Values
}{
{
testName: "queryCache should remain unchanged if already not nil",
testContext: &Context{
queryCache: url.Values{"a": []string{"b"}},
Request: &http.Request{URL: validURL}, // valid request for evidence that values weren't extracted
},
expectedQueryCache: url.Values{"a": []string{"b"}},
},
{
testName: "queryCache should be empty when Request is nil",
testContext: &Context{Request: nil}, // explicit nil for readability
expectedQueryCache: url.Values{},
},
{
testName: "queryCache should be empty when Request.URL is nil",
testContext: &Context{Request: &http.Request{URL: nil}}, // explicit nil for readability
expectedQueryCache: url.Values{},
},
{
testName: "queryCache should be populated when it not yet populated and Request + Request.URL are non nil",
testContext: &Context{Request: &http.Request{URL: validURL}}, // explicit nil for readability
expectedQueryCache: url.Values{"key": []string{"value"}, "otherkey": []string{"othervalue"}},
},
}
for _, test := range tests {
t.Run(test.testName, func(t *testing.T) {
test.testContext.initQueryCache()
assert.Equal(t, test.expectedQueryCache, test.testContext.queryCache)
})
}
}
func TestContextDefaultQueryOnEmptyRequest(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder()) // here c.Request == nil
assert.NotPanics(t, func() {
value, ok := c.GetQuery("NoKey")
assert.False(t, ok)
assert.Empty(t, value)
})
assert.NotPanics(t, func() {
assert.Equal(t, "nada", c.DefaultQuery("NoKey", "nada"))
})
assert.NotPanics(t, func() {
assert.Empty(t, c.Query("NoKey"))
})
}
func TestContextQueryAndPostForm(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
body := strings.NewReader("foo=bar&page=11&both=&foo=second")
c.Request, _ = http.NewRequest(http.MethodPost,
"/?both=GET&id=main&id=omit&array[]=first&array[]=second&ids[a]=hi&ids[b]=3.14", body)
c.Request.Header.Add("Content-Type", MIMEPOSTForm)
assert.Equal(t, "bar", c.DefaultPostForm("foo", "none"))
assert.Equal(t, "bar", c.PostForm("foo"))
assert.Empty(t, c.Query("foo"))
value, ok := c.GetPostForm("page")
assert.True(t, ok)
assert.Equal(t, "11", value)
assert.Equal(t, "11", c.DefaultPostForm("page", "0"))
assert.Equal(t, "11", c.PostForm("page"))
assert.Empty(t, c.Query("page"))
value, ok = c.GetPostForm("both")
assert.True(t, ok)
assert.Empty(t, value)
assert.Empty(t, c.PostForm("both"))
assert.Empty(t, c.DefaultPostForm("both", "nothing"))
assert.Equal(t, http.MethodGet, c.Query("both"), http.MethodGet)
value, ok = c.GetQuery("id")
assert.True(t, ok)
assert.Equal(t, "main", value)
assert.Equal(t, "000", c.DefaultPostForm("id", "000"))
assert.Equal(t, "main", c.Query("id"))
assert.Empty(t, c.PostForm("id"))
value, ok = c.GetQuery("NoKey")
assert.False(t, ok)
assert.Empty(t, value)
value, ok = c.GetPostForm("NoKey")
assert.False(t, ok)
assert.Empty(t, value)
assert.Equal(t, "nada", c.DefaultPostForm("NoKey", "nada"))
assert.Equal(t, "nothing", c.DefaultQuery("NoKey", "nothing"))
assert.Empty(t, c.PostForm("NoKey"))
assert.Empty(t, c.Query("NoKey"))
var obj struct {
Foo string `form:"foo"`
ID string `form:"id"`
Page int `form:"page"`
Both string `form:"both"`
Array []string `form:"array[]"`
}
require.NoError(t, c.Bind(&obj))
assert.Equal(t, "bar", obj.Foo, "bar")
assert.Equal(t, "main", obj.ID, "main")
assert.Equal(t, 11, obj.Page, 11)
assert.Empty(t, obj.Both)
assert.Equal(t, []string{"first", "second"}, obj.Array)
values, ok := c.GetQueryArray("array[]")
assert.True(t, ok)
assert.Equal(t, "first", values[0])
assert.Equal(t, "second", values[1])
values = c.QueryArray("array[]")
assert.Equal(t, "first", values[0])
assert.Equal(t, "second", values[1])
values = c.QueryArray("nokey")
assert.Empty(t, values)
values = c.QueryArray("both")
assert.Len(t, values, 1)
assert.Equal(t, http.MethodGet, values[0])
dicts, ok := c.GetQueryMap("ids")
assert.True(t, ok)
assert.Equal(t, "hi", dicts["a"])
assert.Equal(t, "3.14", dicts["b"])
dicts, ok = c.GetQueryMap("nokey")
assert.False(t, ok)
assert.Empty(t, dicts)
dicts, ok = c.GetQueryMap("both")
assert.False(t, ok)
assert.Empty(t, dicts)
dicts, ok = c.GetQueryMap("array")
assert.False(t, ok)
assert.Empty(t, dicts)
dicts = c.QueryMap("ids")
assert.Equal(t, "hi", dicts["a"])
assert.Equal(t, "3.14", dicts["b"])
dicts = c.QueryMap("nokey")
assert.Empty(t, dicts)
}
func TestContextPostFormMultipart(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request = createMultipartRequest()
var obj struct {
Foo string `form:"foo"`
Bar string `form:"bar"`
BarAsInt int `form:"bar"`
Array []string `form:"array"`
ID string `form:"id"`
TimeLocal time.Time `form:"time_local" time_format:"02/01/2006 15:04"`
TimeUTC time.Time `form:"time_utc" time_format:"02/01/2006 15:04" time_utc:"1"`
TimeLocation time.Time `form:"time_location" time_format:"02/01/2006 15:04" time_location:"Asia/Tokyo"`
BlankTime time.Time `form:"blank_time" time_format:"02/01/2006 15:04"`
}
require.NoError(t, c.Bind(&obj))
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, "10", obj.Bar)
assert.Equal(t, 10, obj.BarAsInt)
assert.Equal(t, []string{"first", "second"}, obj.Array)
assert.Empty(t, obj.ID)
assert.Equal(t, "31/12/2016 14:55", obj.TimeLocal.Format("02/01/2006 15:04"))
assert.Equal(t, time.Local, obj.TimeLocal.Location())
assert.Equal(t, "31/12/2016 14:55", obj.TimeUTC.Format("02/01/2006 15:04"))
assert.Equal(t, time.UTC, obj.TimeUTC.Location())
loc, _ := time.LoadLocation("Asia/Tokyo")
assert.Equal(t, "31/12/2016 14:55", obj.TimeLocation.Format("02/01/2006 15:04"))
assert.Equal(t, loc, obj.TimeLocation.Location())
assert.True(t, obj.BlankTime.IsZero())
value, ok := c.GetQuery("foo")
assert.False(t, ok)
assert.Empty(t, value)
assert.Empty(t, c.Query("bar"))
assert.Equal(t, "nothing", c.DefaultQuery("id", "nothing"))
value, ok = c.GetPostForm("foo")
assert.True(t, ok)
assert.Equal(t, "bar", value)
assert.Equal(t, "bar", c.PostForm("foo"))
value, ok = c.GetPostForm("array")
assert.True(t, ok)
assert.Equal(t, "first", value)
assert.Equal(t, "first", c.PostForm("array"))
assert.Equal(t, "10", c.DefaultPostForm("bar", "nothing"))
value, ok = c.GetPostForm("id")
assert.True(t, ok)
assert.Empty(t, value)
assert.Empty(t, c.PostForm("id"))
assert.Empty(t, c.DefaultPostForm("id", "nothing"))
value, ok = c.GetPostForm("nokey")
assert.False(t, ok)
assert.Empty(t, value)
assert.Equal(t, "nothing", c.DefaultPostForm("nokey", "nothing"))
values, ok := c.GetPostFormArray("array")
assert.True(t, ok)
assert.Equal(t, "first", values[0])
assert.Equal(t, "second", values[1])
values = c.PostFormArray("array")
assert.Equal(t, "first", values[0])
assert.Equal(t, "second", values[1])
values = c.PostFormArray("nokey")
assert.Empty(t, values)
values = c.PostFormArray("foo")
assert.Len(t, values, 1)
assert.Equal(t, "bar", values[0])
dicts, ok := c.GetPostFormMap("names")
assert.True(t, ok)
assert.Equal(t, "thinkerou", dicts["a"])
assert.Equal(t, "tianou", dicts["b"])
dicts, ok = c.GetPostFormMap("nokey")
assert.False(t, ok)
assert.Empty(t, dicts)
dicts = c.PostFormMap("names")
assert.Equal(t, "thinkerou", dicts["a"])
assert.Equal(t, "tianou", dicts["b"])
dicts = c.PostFormMap("nokey")
assert.Empty(t, dicts)
}
func TestContextSetCookie(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.SetSameSite(http.SameSiteLaxMode)
c.SetCookie("user", "gin", 1, "/", "localhost", true, true)
assert.Equal(t, "user=gin; Path=/; Domain=localhost; Max-Age=1; HttpOnly; Secure; SameSite=Lax", c.Writer.Header().Get("Set-Cookie"))
}
func TestContextSetCookiePathEmpty(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.SetSameSite(http.SameSiteLaxMode)
c.SetCookie("user", "gin", 1, "", "localhost", true, true)
assert.Equal(t, "user=gin; Path=/; Domain=localhost; Max-Age=1; HttpOnly; Secure; SameSite=Lax", c.Writer.Header().Get("Set-Cookie"))
}
func TestContextGetCookie(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodGet, "/get", nil)
c.Request.Header.Set("Cookie", "user=gin")
cookie, _ := c.Cookie("user")
assert.Equal(t, "gin", cookie)
_, err := c.Cookie("nokey")
require.Error(t, err)
}
func TestContextBodyAllowedForStatus(t *testing.T) {
assert.False(t, bodyAllowedForStatus(http.StatusProcessing))
assert.False(t, bodyAllowedForStatus(http.StatusNoContent))
assert.False(t, bodyAllowedForStatus(http.StatusNotModified))
assert.True(t, bodyAllowedForStatus(http.StatusInternalServerError))
}
type TestRender struct{}
func (*TestRender) Render(http.ResponseWriter) error {
return errTestRender
}
func (*TestRender) WriteContentType(http.ResponseWriter) {}
func TestContextRenderIfErr(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Render(http.StatusOK, &TestRender{})
assert.Equal(t, errorMsgs{&Error{Err: errTestRender, Type: 1}}, c.Errors)
}
// Tests that the response is serialized as JSON
// and Content-Type is set to application/json
// and special HTML characters are escaped
func TestContextRenderJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.JSON(http.StatusCreated, H{"foo": "bar", "html": "<b>"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.JSONEq(t, "{\"foo\":\"bar\",\"html\":\"\\u003cb\\u003e\"}", w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that the response is serialized as JSONP
// and Content-Type is set to application/javascript
func TestContextRenderJSONP(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodGet, "http://example.com/?callback=x", nil)
c.JSONP(http.StatusCreated, H{"foo": "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "x({\"foo\":\"bar\"});", w.Body.String())
assert.Equal(t, "application/javascript; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that the response is serialized as JSONP
// and Content-Type is set to application/json
func TestContextRenderJSONPWithoutCallback(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodGet, "http://example.com", nil)
c.JSONP(http.StatusCreated, H{"foo": "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.JSONEq(t, `{"foo":"bar"}`, w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that no JSON is rendered if code is 204
func TestContextRenderNoContentJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.JSON(http.StatusNoContent, H{"foo": "bar"})
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that the response is serialized as JSON
// we change the content-type before
func TestContextRenderAPIJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Header("Content-Type", "application/vnd.api+json")
c.JSON(http.StatusCreated, H{"foo": "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.JSONEq(t, `{"foo":"bar"}`, w.Body.String())
assert.Equal(t, "application/vnd.api+json", w.Header().Get("Content-Type"))
}
// Tests that no Custom JSON is rendered if code is 204
func TestContextRenderNoContentAPIJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Header("Content-Type", "application/vnd.api+json")
c.JSON(http.StatusNoContent, H{"foo": "bar"})
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "application/vnd.api+json", w.Header().Get("Content-Type"))
}
// Tests that the response is serialized as JSON
// and Content-Type is set to application/json
func TestContextRenderIndentedJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.IndentedJSON(http.StatusCreated, H{"foo": "bar", "bar": "foo", "nested": H{"foo": "bar"}})
assert.Equal(t, http.StatusCreated, w.Code)
assert.JSONEq(t, "{\n \"bar\": \"foo\",\n \"foo\": \"bar\",\n \"nested\": {\n \"foo\": \"bar\"\n }\n}", w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that no Custom JSON is rendered if code is 204
func TestContextRenderNoContentIndentedJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.IndentedJSON(http.StatusNoContent, H{"foo": "bar", "bar": "foo", "nested": H{"foo": "bar"}})
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextClientIPWithMultipleHeaders(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodGet, "/test", nil)
// Multiple X-Forwarded-For headers
c.Request.Header.Add("X-Forwarded-For", "1.2.3.4, "+localhostIP)
c.Request.Header.Add("X-Forwarded-For", "5.6.7.8")
c.Request.RemoteAddr = localhostIP + ":1234"
c.engine.ForwardedByClientIP = true
c.engine.RemoteIPHeaders = []string{"X-Forwarded-For"}
_ = c.engine.SetTrustedProxies([]string{localhostIP})
// Should return 5.6.7.8 (last non-trusted IP)
assert.Equal(t, "5.6.7.8", c.ClientIP())
}
func TestContextClientIPWithSingleHeader(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodGet, "/test", nil)
c.Request.Header.Set("X-Forwarded-For", "1.2.3.4, "+localhostIP)
c.Request.RemoteAddr = localhostIP + ":1234"
c.engine.ForwardedByClientIP = true
c.engine.RemoteIPHeaders = []string{"X-Forwarded-For"}
_ = c.engine.SetTrustedProxies([]string{localhostIP})
// Should return 1.2.3.4
assert.Equal(t, "1.2.3.4", c.ClientIP())
}
// Tests that the response is serialized as Secure JSON
// and Content-Type is set to application/json
func TestContextRenderSecureJSON(t *testing.T) {
w := httptest.NewRecorder()
c, router := CreateTestContext(w)
router.SecureJsonPrefix("&&&START&&&")
c.SecureJSON(http.StatusCreated, []string{"foo", "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "&&&START&&&[\"foo\",\"bar\"]", w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that no Custom JSON is rendered if code is 204
func TestContextRenderNoContentSecureJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.SecureJSON(http.StatusNoContent, []string{"foo", "bar"})
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextRenderNoContentAsciiJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.AsciiJSON(http.StatusNoContent, []string{"lang", "Go语言"})
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "application/json", w.Header().Get("Content-Type"))
}
// Tests that the response is serialized as JSON
// and Content-Type is set to application/json
// and special HTML characters are preserved
func TestContextRenderPureJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.PureJSON(http.StatusCreated, H{"foo": "bar", "html": "<b>"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.JSONEq(t, "{\"foo\":\"bar\",\"html\":\"<b>\"}\n", w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that the response executes the templates
// and responds with Content-Type set to text/html
func TestContextRenderHTML(t *testing.T) {
w := httptest.NewRecorder()
c, router := CreateTestContext(w)
templ := template.Must(template.New("t").Parse(`Hello {{.name}}`))
router.SetHTMLTemplate(templ)
c.HTML(http.StatusCreated, "t", H{"name": "alexandernyquist"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "Hello alexandernyquist", w.Body.String())
assert.Equal(t, "text/html; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextRenderHTML2(t *testing.T) {
w := httptest.NewRecorder()
c, router := CreateTestContext(w)
// print debug warning log when Engine.trees > 0
router.addRoute(http.MethodGet, "/", HandlersChain{func(_ *Context) {}})
assert.Len(t, router.trees, 1)
templ := template.Must(template.New("t").Parse(`Hello {{.name}}`))
re := captureOutput(t, func() {
SetMode(DebugMode)
router.SetHTMLTemplate(templ)
SetMode(TestMode)
})
assert.Equal(t, "[GIN-debug] [WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called\nat initialization. ie. before any route is registered or the router is listening in a socket:\n\n\trouter := gin.Default()\n\trouter.SetHTMLTemplate(template) // << good place\n\n", re)
c.HTML(http.StatusCreated, "t", H{"name": "alexandernyquist"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "Hello alexandernyquist", w.Body.String())
assert.Equal(t, "text/html; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that no HTML is rendered if code is 204
func TestContextRenderNoContentHTML(t *testing.T) {
w := httptest.NewRecorder()
c, router := CreateTestContext(w)
templ := template.Must(template.New("t").Parse(`Hello {{.name}}`))
router.SetHTMLTemplate(templ)
c.HTML(http.StatusNoContent, "t", H{"name": "alexandernyquist"})
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "text/html; charset=utf-8", w.Header().Get("Content-Type"))
}
// TestContextRenderXML tests that the response is serialized as XML
// and Content-Type is set to application/xml
func TestContextRenderXML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.XML(http.StatusCreated, H{"foo": "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "<map><foo>bar</foo></map>", w.Body.String())
assert.Equal(t, "application/xml; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that no XML is rendered if code is 204
func TestContextRenderNoContentXML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.XML(http.StatusNoContent, H{"foo": "bar"})
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "application/xml; charset=utf-8", w.Header().Get("Content-Type"))
}
// TestContextRenderString tests that the response is returned
// with Content-Type set to text/plain
func TestContextRenderString(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.String(http.StatusCreated, "test %s %d", "string", 2)
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "test string 2", w.Body.String())
assert.Equal(t, "text/plain; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that no String is rendered if code is 204
func TestContextRenderNoContentString(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.String(http.StatusNoContent, "test %s %d", "string", 2)
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "text/plain; charset=utf-8", w.Header().Get("Content-Type"))
}
// TestContextRenderHTMLString tests that the response is returned
// with Content-Type set to text/html
func TestContextRenderHTMLString(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Header("Content-Type", "text/html; charset=utf-8")
c.String(http.StatusCreated, "<html>%s %d</html>", "string", 3)
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "<html>string 3</html>", w.Body.String())
assert.Equal(t, "text/html; charset=utf-8", w.Header().Get("Content-Type"))
}
// Tests that no HTML String is rendered if code is 204
func TestContextRenderNoContentHTMLString(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Header("Content-Type", "text/html; charset=utf-8")
c.String(http.StatusNoContent, "<html>%s %d</html>", "string", 3)
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "text/html; charset=utf-8", w.Header().Get("Content-Type"))
}
// TestContextRenderData tests that the response can be written from `bytestring`
// with specified MIME type
func TestContextRenderData(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Data(http.StatusCreated, "text/csv", []byte(`foo,bar`))
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "foo,bar", w.Body.String())
assert.Equal(t, "text/csv", w.Header().Get("Content-Type"))
}
// Tests that no Custom Data is rendered if code is 204
func TestContextRenderNoContentData(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Data(http.StatusNoContent, "text/csv", []byte(`foo,bar`))
assert.Equal(t, http.StatusNoContent, w.Code)
assert.Empty(t, w.Body.String())
assert.Equal(t, "text/csv", w.Header().Get("Content-Type"))
}
func TestContextRenderSSE(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.SSEvent("float", 1.5)
c.Render(-1, sse.Event{
Id: "123",
Data: "text",
})
c.SSEvent("chat", H{
"foo": "bar",
"bar": "foo",
})
assert.Equal(t, strings.ReplaceAll(w.Body.String(), " ", ""), strings.ReplaceAll("event:float\ndata:1.5\n\nid:123\ndata:text\n\nevent:chat\ndata:{\"bar\":\"foo\",\"foo\":\"bar\"}\n\n", " ", ""))
}
func TestContextRenderFile(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
c.File("./gin.go")
assert.Equal(t, http.StatusOK, w.Code)
assert.Contains(t, w.Body.String(), "func New(opts ...OptionFunc) *Engine {")
// Content-Type='text/plain; charset=utf-8' when go version <= 1.16,
// else, Content-Type='text/x-go; charset=utf-8'
assert.NotEmpty(t, w.Header().Get("Content-Type"))
}
func TestContextRenderFileFromFS(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodGet, "/some/path", nil)
c.FileFromFS("./gin.go", Dir(".", false))
assert.Equal(t, http.StatusOK, w.Code)
assert.Contains(t, w.Body.String(), "func New(opts ...OptionFunc) *Engine {")
// Content-Type='text/plain; charset=utf-8' when go version <= 1.16,
// else, Content-Type='text/x-go; charset=utf-8'
assert.NotEmpty(t, w.Header().Get("Content-Type"))
assert.Equal(t, "/some/path", c.Request.URL.Path)
}
func TestContextRenderAttachment(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
newFilename := "new_filename.go"
c.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
c.FileAttachment("./gin.go", newFilename)
assert.Equal(t, 200, w.Code)
assert.Contains(t, w.Body.String(), "func New(opts ...OptionFunc) *Engine {")
assert.Equal(t, fmt.Sprintf("attachment; filename=\"%s\"", newFilename), w.Header().Get("Content-Disposition"))
}
func TestContextRenderAndEscapeAttachment(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
maliciousFilename := "tampering_field.sh\"; \\\"; dummy=.go"
actualEscapedResponseFilename := "tampering_field.sh\\\"; \\\\\\\"; dummy=.go"
c.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
c.FileAttachment("./gin.go", maliciousFilename)
assert.Equal(t, 200, w.Code)
assert.Contains(t, w.Body.String(), "func New(opts ...OptionFunc) *Engine {")
assert.Equal(t, fmt.Sprintf("attachment; filename=\"%s\"", actualEscapedResponseFilename), w.Header().Get("Content-Disposition"))
}
func TestContextRenderUTF8Attachment(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
newFilename := "new🧡_filename.go"
c.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
c.FileAttachment("./gin.go", newFilename)
assert.Equal(t, 200, w.Code)
assert.Contains(t, w.Body.String(), "func New(opts ...OptionFunc) *Engine {")
assert.Equal(t, `attachment; filename*=UTF-8''`+url.QueryEscape(newFilename), w.Header().Get("Content-Disposition"))
}
// TestContextRenderYAML tests that the response is serialized as YAML
// and Content-Type is set to application/yaml
func TestContextRenderYAML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.YAML(http.StatusCreated, H{"foo": "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "foo: bar\n", w.Body.String())
assert.Equal(t, "application/yaml; charset=utf-8", w.Header().Get("Content-Type"))
}
// TestContextRenderTOML tests that the response is serialized as TOML
// and Content-Type is set to application/toml
func TestContextRenderTOML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.TOML(http.StatusCreated, H{"foo": "bar"})
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "foo = 'bar'\n", w.Body.String())
assert.Equal(t, "application/toml; charset=utf-8", w.Header().Get("Content-Type"))
}
// TestContextRenderProtoBuf tests that the response is serialized as ProtoBuf
// and Content-Type is set to application/x-protobuf
// and we just use the example protobuf to check if the response is correct
func TestContextRenderProtoBuf(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
reps := []int64{int64(1), int64(2)}
label := "test"
data := &testdata.Test{
Label: &label,
Reps: reps,
}
c.ProtoBuf(http.StatusCreated, data)
protoData, err := proto.Marshal(data)
require.NoError(t, err)
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, string(protoData), w.Body.String())
assert.Equal(t, "application/x-protobuf", w.Header().Get("Content-Type"))
}
func TestContextHeaders(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Header("Content-Type", "text/plain")
c.Header("X-Custom", "value")
assert.Equal(t, "text/plain", c.Writer.Header().Get("Content-Type"))
assert.Equal(t, "value", c.Writer.Header().Get("X-Custom"))
c.Header("Content-Type", "text/html")
c.Header("X-Custom", "")
assert.Equal(t, "text/html", c.Writer.Header().Get("Content-Type"))
_, exist := c.Writer.Header()["X-Custom"]
assert.False(t, exist)
}
// TODO
func TestContextRenderRedirectWithRelativePath(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "http://example.com", nil)
assert.Panics(t, func() { c.Redirect(299, "/new_path") })
assert.Panics(t, func() { c.Redirect(309, "/new_path") })
c.Redirect(http.StatusMovedPermanently, "/path")
c.Writer.WriteHeaderNow()
assert.Equal(t, http.StatusMovedPermanently, w.Code)
assert.Equal(t, "/path", w.Header().Get("Location"))
}
func TestContextRenderRedirectWithAbsolutePath(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "http://example.com", nil)
c.Redirect(http.StatusFound, "http://google.com")
c.Writer.WriteHeaderNow()
assert.Equal(t, http.StatusFound, w.Code)
assert.Equal(t, "http://google.com", w.Header().Get("Location"))
}
func TestContextRenderRedirectWith201(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "http://example.com", nil)
c.Redirect(http.StatusCreated, "/resource")
c.Writer.WriteHeaderNow()
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, "/resource", w.Header().Get("Location"))
}
func TestContextRenderRedirectAll(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "http://example.com", nil)
assert.Panics(t, func() { c.Redirect(http.StatusOK, "/resource") })
assert.Panics(t, func() { c.Redirect(http.StatusAccepted, "/resource") })
assert.Panics(t, func() { c.Redirect(299, "/resource") })
assert.Panics(t, func() { c.Redirect(309, "/resource") })
assert.NotPanics(t, func() { c.Redirect(http.StatusMultipleChoices, "/resource") })
assert.NotPanics(t, func() { c.Redirect(http.StatusPermanentRedirect, "/resource") })
}
func TestContextNegotiationWithJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
c.Negotiate(http.StatusOK, Negotiate{
Offered: []string{MIMEJSON, MIMEXML, MIMEYAML, MIMEYAML2},
Data: H{"foo": "bar"},
})
assert.Equal(t, http.StatusOK, w.Code)
assert.JSONEq(t, `{"foo":"bar"}`, w.Body.String())
assert.Equal(t, "application/json; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextNegotiationWithXML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
c.Negotiate(http.StatusOK, Negotiate{
Offered: []string{MIMEXML, MIMEJSON, MIMEYAML, MIMEYAML2},
Data: H{"foo": "bar"},
})
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, "<map><foo>bar</foo></map>", w.Body.String())
assert.Equal(t, "application/xml; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextNegotiationWithYAML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
c.Negotiate(http.StatusOK, Negotiate{
Offered: []string{MIMEYAML, MIMEXML, MIMEJSON, MIMETOML, MIMEYAML2},
Data: H{"foo": "bar"},
})
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, "foo: bar\n", w.Body.String())
assert.Equal(t, "application/yaml; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextNegotiationWithTOML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
c.Negotiate(http.StatusOK, Negotiate{
Offered: []string{MIMETOML, MIMEXML, MIMEJSON, MIMEYAML, MIMEYAML2},
Data: H{"foo": "bar"},
})
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, "foo = 'bar'\n", w.Body.String())
assert.Equal(t, "application/toml; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextNegotiationWithHTML(t *testing.T) {
w := httptest.NewRecorder()
c, router := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
templ := template.Must(template.New("t").Parse(`Hello {{.name}}`))
router.SetHTMLTemplate(templ)
c.Negotiate(http.StatusOK, Negotiate{
Offered: []string{MIMEHTML},
Data: H{"name": "gin"},
HTMLName: "t",
})
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, "Hello gin", w.Body.String())
assert.Equal(t, "text/html; charset=utf-8", w.Header().Get("Content-Type"))
}
func TestContextNegotiationWithPROTOBUF(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodPost, "/", nil)
reps := []int64{int64(1), int64(2)}
label := "test"
data := &testdata.Test{
Label: &label,
Reps: reps,
}
c.Negotiate(http.StatusCreated, Negotiate{
Offered: []string{MIMEPROTOBUF, MIMEJSON, MIMEXML},
Data: data,
})
// Marshal original data for comparison
protoData, err := proto.Marshal(data)
require.NoError(t, err)
assert.Equal(t, http.StatusCreated, w.Code)
assert.Equal(t, string(protoData), w.Body.String())
assert.Equal(t, "application/x-protobuf", w.Header().Get("Content-Type"))
}
func TestContextNegotiationWithBSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
c.Negotiate(http.StatusOK, Negotiate{
Offered: []string{MIMEBSON, MIMEXML, MIMEJSON, MIMEYAML, MIMEYAML2},
Data: H{"foo": "bar"},
})
bData, _ := bson.Marshal(H{"foo": "bar"})
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, string(bData), w.Body.String())
assert.Equal(t, "application/bson", w.Header().Get("Content-Type"))
}
func TestContextNegotiationNotSupport(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
c.Negotiate(http.StatusOK, Negotiate{
Offered: []string{MIMEPOSTForm},
})
assert.Equal(t, http.StatusNotAcceptable, w.Code)
assert.Equal(t, abortIndex, c.index)
assert.True(t, c.IsAborted())
}
func TestContextNegotiationFormat(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "", nil)
assert.Panics(t, func() { c.NegotiateFormat() })
assert.Equal(t, MIMEJSON, c.NegotiateFormat(MIMEJSON, MIMEXML)) //nolint:testifylint
assert.Equal(t, MIMEHTML, c.NegotiateFormat(MIMEHTML, MIMEJSON))
}
func TestContextNegotiationFormatWithAccept(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Add("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9;q=0.8")
assert.Equal(t, MIMEXML, c.NegotiateFormat(MIMEJSON, MIMEXML))
assert.Equal(t, MIMEHTML, c.NegotiateFormat(MIMEXML, MIMEHTML))
assert.Empty(t, c.NegotiateFormat(MIMEJSON))
}
func TestContextNegotiationFormatWithWildcardAccept(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Add("Accept", "*/*")
assert.Equal(t, "*/*", c.NegotiateFormat("*/*"))
assert.Equal(t, "text/*", c.NegotiateFormat("text/*"))
assert.Equal(t, "application/*", c.NegotiateFormat("application/*"))
assert.Equal(t, MIMEJSON, c.NegotiateFormat(MIMEJSON)) //nolint:testifylint
assert.Equal(t, MIMEXML, c.NegotiateFormat(MIMEXML))
assert.Equal(t, MIMEHTML, c.NegotiateFormat(MIMEHTML))
c, _ = CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Add("Accept", "text/*")
assert.Equal(t, "*/*", c.NegotiateFormat("*/*"))
assert.Equal(t, "text/*", c.NegotiateFormat("text/*"))
assert.Empty(t, c.NegotiateFormat("application/*"))
assert.Empty(t, c.NegotiateFormat(MIMEJSON))
assert.Empty(t, c.NegotiateFormat(MIMEXML))
assert.Equal(t, MIMEHTML, c.NegotiateFormat(MIMEHTML))
}
func TestContextNegotiationFormatCustom(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Add("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9;q=0.8")
c.Accepted = nil
c.SetAccepted(MIMEJSON, MIMEXML)
assert.Equal(t, MIMEJSON, c.NegotiateFormat(MIMEJSON, MIMEXML)) //nolint:testifylint
assert.Equal(t, MIMEXML, c.NegotiateFormat(MIMEXML, MIMEHTML))
assert.Equal(t, MIMEJSON, c.NegotiateFormat(MIMEJSON)) //nolint:testifylint
}
func TestContextNegotiationFormat2(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Add("Accept", "image/tiff-fx")
assert.Empty(t, c.NegotiateFormat("image/tiff"))
}
func TestContextIsAborted(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
assert.False(t, c.IsAborted())
c.Abort()
assert.True(t, c.IsAborted())
c.Next()
assert.True(t, c.IsAborted())
c.index++
assert.True(t, c.IsAborted())
}
// TestContextAbortWithStatus tests that the response can be written from `bytestring`
// with specified MIME type
func TestContextAbortWithStatus(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.index = 4
c.AbortWithStatus(http.StatusUnauthorized)
assert.Equal(t, abortIndex, c.index)
assert.Equal(t, http.StatusUnauthorized, c.Writer.Status())
assert.Equal(t, http.StatusUnauthorized, w.Code)
assert.True(t, c.IsAborted())
}
type testJSONAbortMsg struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
func TestContextAbortWithStatusJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.index = 4
in := new(testJSONAbortMsg)
in.Bar = "barValue"
in.Foo = "fooValue"
c.AbortWithStatusJSON(http.StatusUnsupportedMediaType, in)
assert.Equal(t, abortIndex, c.index)
assert.Equal(t, http.StatusUnsupportedMediaType, c.Writer.Status())
assert.Equal(t, http.StatusUnsupportedMediaType, w.Code)
assert.True(t, c.IsAborted())
contentType := w.Header().Get("Content-Type")
assert.Equal(t, "application/json; charset=utf-8", contentType)
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(w.Body)
require.NoError(t, err)
jsonStringBody := buf.String()
assert.JSONEq(t, "{\"foo\":\"fooValue\",\"bar\":\"barValue\"}", jsonStringBody)
}
func TestContextAbortWithStatusPureJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.index = 4
in := new(testJSONAbortMsg)
in.Bar = "barValue"
in.Foo = "fooValue"
c.AbortWithStatusPureJSON(http.StatusUnsupportedMediaType, in)
assert.Equal(t, abortIndex, c.index)
assert.Equal(t, http.StatusUnsupportedMediaType, c.Writer.Status())
assert.Equal(t, http.StatusUnsupportedMediaType, w.Code)
assert.True(t, c.IsAborted())
contentType := w.Header().Get("Content-Type")
assert.Equal(t, "application/json; charset=utf-8", contentType)
buf := new(bytes.Buffer)
_, err := buf.ReadFrom(w.Body)
require.NoError(t, err)
jsonStringBody := buf.String()
assert.JSONEq(t, "{\"foo\":\"fooValue\",\"bar\":\"barValue\"}", jsonStringBody)
}
func TestContextError(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
assert.Empty(t, c.Errors)
firstErr := errors.New("first error")
c.Error(firstErr) //nolint: errcheck
assert.Len(t, c.Errors, 1)
assert.Equal(t, "Error #01: first error\n", c.Errors.String())
secondErr := errors.New("second error")
c.Error(&Error{ //nolint: errcheck
Err: secondErr,
Meta: "some data 2",
Type: ErrorTypePublic,
})
assert.Len(t, c.Errors, 2)
assert.Equal(t, firstErr, c.Errors[0].Err)
assert.Nil(t, c.Errors[0].Meta)
assert.Equal(t, ErrorTypePrivate, c.Errors[0].Type)
assert.Equal(t, secondErr, c.Errors[1].Err)
assert.Equal(t, "some data 2", c.Errors[1].Meta)
assert.Equal(t, ErrorTypePublic, c.Errors[1].Type)
assert.Equal(t, c.Errors.Last(), c.Errors[1])
defer func() {
if recover() == nil {
t.Error("didn't panic")
}
}()
c.Error(nil) //nolint: errcheck
}
func TestContextTypedError(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Error(errors.New("externo 0")).SetType(ErrorTypePublic) //nolint: errcheck
c.Error(errors.New("interno 0")).SetType(ErrorTypePrivate) //nolint: errcheck
for _, err := range c.Errors.ByType(ErrorTypePublic) {
assert.Equal(t, ErrorTypePublic, err.Type)
}
for _, err := range c.Errors.ByType(ErrorTypePrivate) {
assert.Equal(t, ErrorTypePrivate, err.Type)
}
assert.Equal(t, []string{"externo 0", "interno 0"}, c.Errors.Errors())
}
func TestContextAbortWithError(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.AbortWithError(http.StatusUnauthorized, errors.New("bad input")).SetMeta("some input") //nolint: errcheck
assert.Equal(t, http.StatusUnauthorized, w.Code)
assert.Equal(t, abortIndex, c.index)
assert.True(t, c.IsAborted())
}
func TestContextClientIP(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.engine.trustedCIDRs, _ = c.engine.prepareTrustedCIDRs()
resetContextForClientIPTests(c)
// unix address
addr := &net.UnixAddr{Net: "unix", Name: "@"}
c.Request = c.Request.WithContext(context.WithValue(c.Request.Context(), http.LocalAddrContextKey, addr))
c.Request.RemoteAddr = addr.String()
assert.Equal(t, "20.20.20.20", c.ClientIP())
// reset
c.Request = c.Request.WithContext(context.Background())
resetContextForClientIPTests(c)
// Legacy tests (validating that the defaults don't break the
// (insecure!) old behaviour)
assert.Equal(t, "20.20.20.20", c.ClientIP())
c.Request.Header.Del("X-Forwarded-For")
assert.Equal(t, "10.10.10.10", c.ClientIP())
c.Request.Header.Set("X-Forwarded-For", "30.30.30.30 ")
assert.Equal(t, "30.30.30.30", c.ClientIP())
c.Request.Header.Del("X-Forwarded-For")
c.Request.Header.Del("X-Real-IP")
c.engine.TrustedPlatform = PlatformGoogleAppEngine
assert.Equal(t, "50.50.50.50", c.ClientIP())
c.Request.Header.Del("X-Appengine-Remote-Addr")
assert.Equal(t, "40.40.40.40", c.ClientIP())
// no port
c.Request.RemoteAddr = "50.50.50.50"
assert.Empty(t, c.ClientIP())
// Tests exercising the TrustedProxies functionality
resetContextForClientIPTests(c)
// IPv6 support
c.Request.RemoteAddr = fmt.Sprintf("[%s]:12345", localhostIPv6)
assert.Equal(t, "20.20.20.20", c.ClientIP())
resetContextForClientIPTests(c)
// No trusted proxies
_ = c.engine.SetTrustedProxies([]string{})
c.engine.RemoteIPHeaders = []string{"X-Forwarded-For"}
assert.Equal(t, "40.40.40.40", c.ClientIP())
// Disabled TrustedProxies feature
_ = c.engine.SetTrustedProxies(nil)
assert.Equal(t, "40.40.40.40", c.ClientIP())
// Last proxy is trusted, but the RemoteAddr is not
_ = c.engine.SetTrustedProxies([]string{"30.30.30.30"})
assert.Equal(t, "40.40.40.40", c.ClientIP())
// Only trust RemoteAddr
_ = c.engine.SetTrustedProxies([]string{"40.40.40.40"})
assert.Equal(t, "30.30.30.30", c.ClientIP())
// All steps are trusted
_ = c.engine.SetTrustedProxies([]string{"40.40.40.40", "30.30.30.30", "20.20.20.20"})
assert.Equal(t, "20.20.20.20", c.ClientIP())
// Use CIDR
_ = c.engine.SetTrustedProxies([]string{"40.40.25.25/16", "30.30.30.30"})
assert.Equal(t, "20.20.20.20", c.ClientIP())
// Use hostname that resolves to all the proxies
_ = c.engine.SetTrustedProxies([]string{"foo"})
assert.Equal(t, "40.40.40.40", c.ClientIP())
// Use hostname that returns an error
_ = c.engine.SetTrustedProxies([]string{"bar"})
assert.Equal(t, "40.40.40.40", c.ClientIP())
// X-Forwarded-For has a non-IP element
_ = c.engine.SetTrustedProxies([]string{"40.40.40.40"})
c.Request.Header.Set("X-Forwarded-For", " blah ")
assert.Equal(t, "40.40.40.40", c.ClientIP())
// Result from LookupHost has non-IP element. This should never
// happen, but we should test it to make sure we handle it
// gracefully.
_ = c.engine.SetTrustedProxies([]string{"baz"})
c.Request.Header.Set("X-Forwarded-For", " 30.30.30.30 ")
assert.Equal(t, "40.40.40.40", c.ClientIP())
_ = c.engine.SetTrustedProxies([]string{"40.40.40.40"})
c.Request.Header.Del("X-Forwarded-For")
c.engine.RemoteIPHeaders = []string{"X-Forwarded-For", "X-Real-IP"}
assert.Equal(t, "10.10.10.10", c.ClientIP())
c.engine.RemoteIPHeaders = []string{}
c.engine.TrustedPlatform = PlatformGoogleAppEngine
assert.Equal(t, "50.50.50.50", c.ClientIP())
// Use custom TrustedPlatform header
c.engine.TrustedPlatform = "X-CDN-IP"
c.Request.Header.Set("X-CDN-IP", "80.80.80.80")
assert.Equal(t, "80.80.80.80", c.ClientIP())
// wrong header
c.engine.TrustedPlatform = "X-Wrong-Header"
assert.Equal(t, "40.40.40.40", c.ClientIP())
c.Request.Header.Del("X-CDN-IP")
// TrustedPlatform is empty
c.engine.TrustedPlatform = ""
assert.Equal(t, "40.40.40.40", c.ClientIP())
// Test the legacy flag
c.engine.AppEngine = true
assert.Equal(t, "50.50.50.50", c.ClientIP())
c.engine.AppEngine = false
c.engine.TrustedPlatform = PlatformGoogleAppEngine
c.Request.Header.Del("X-Appengine-Remote-Addr")
assert.Equal(t, "40.40.40.40", c.ClientIP())
c.engine.TrustedPlatform = PlatformCloudflare
assert.Equal(t, "60.60.60.60", c.ClientIP())
c.Request.Header.Del("CF-Connecting-IP")
assert.Equal(t, "40.40.40.40", c.ClientIP())
c.engine.TrustedPlatform = PlatformFlyIO
assert.Equal(t, "70.70.70.70", c.ClientIP())
c.Request.Header.Del("Fly-Client-IP")
assert.Equal(t, "40.40.40.40", c.ClientIP())
c.engine.TrustedPlatform = ""
// no port
c.Request.RemoteAddr = "50.50.50.50"
assert.Empty(t, c.ClientIP())
}
func resetContextForClientIPTests(c *Context) {
c.Request.Header.Set("X-Real-IP", " 10.10.10.10 ")
c.Request.Header.Set("X-Forwarded-For", " 20.20.20.20, 30.30.30.30")
c.Request.Header.Set("X-Appengine-Remote-Addr", "50.50.50.50")
c.Request.Header.Set("CF-Connecting-IP", "60.60.60.60")
c.Request.Header.Set("Fly-Client-IP", "70.70.70.70")
c.Request.RemoteAddr = " 40.40.40.40:42123 "
c.engine.TrustedPlatform = ""
c.engine.trustedCIDRs = defaultTrustedCIDRs
c.engine.AppEngine = false
}
func TestContextContentType(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Set("Content-Type", "application/json; charset=utf-8")
assert.Equal(t, "application/json", c.ContentType())
}
func TestContextBindRequestTooLarge(t *testing.T) {
// When using go-json as JSON encoder, they do not propagate the http.MaxBytesError error
// The response will fail with a generic 400 instead of 413
// https://github.com/goccy/go-json/issues/485
var expectedCode int
switch json.Package {
case "github.com/goccy/go-json":
expectedCode = http.StatusBadRequest
default:
expectedCode = http.StatusRequestEntityTooLarge
}
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`{"foo":"bar", "bar":"foo"}`))
c.Request.Body = http.MaxBytesReader(c.Writer, c.Request.Body, 10)
var obj struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
require.Error(t, c.BindJSON(&obj))
c.Writer.WriteHeaderNow()
assert.Empty(t, obj.Bar)
assert.Empty(t, obj.Foo)
assert.Equal(t, expectedCode, w.Code)
assert.True(t, c.IsAborted())
}
func TestContextAutoBindJSON(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`{"foo":"bar", "bar":"foo"}`))
c.Request.Header.Add("Content-Type", MIMEJSON)
var obj struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
require.NoError(t, c.Bind(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Empty(t, c.Errors)
}
func TestContextBindWithJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`{"foo":"bar", "bar":"foo"}`))
c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type
var obj struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
require.NoError(t, c.BindJSON(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBindWithXML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
<bar>BAR</bar>
</root>`))
c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type
var obj struct {
Foo string `xml:"foo"`
Bar string `xml:"bar"`
}
require.NoError(t, c.BindXML(&obj))
assert.Equal(t, "FOO", obj.Foo)
assert.Equal(t, "BAR", obj.Bar)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBindPlain(t *testing.T) {
// string
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(`test string`))
c.Request.Header.Add("Content-Type", MIMEPlain)
var s string
require.NoError(t, c.BindPlain(&s))
assert.Equal(t, "test string", s)
assert.Equal(t, 0, w.Body.Len())
// []byte
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(`test []byte`))
c.Request.Header.Add("Content-Type", MIMEPlain)
var bs []byte
require.NoError(t, c.BindPlain(&bs))
assert.Equal(t, []byte("test []byte"), bs)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBindHeader(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Add("rate", "8000")
c.Request.Header.Add("domain", "music")
c.Request.Header.Add("limit", "1000")
var testHeader struct {
Rate int `header:"Rate"`
Domain string `header:"Domain"`
Limit int `header:"limit"`
}
require.NoError(t, c.BindHeader(&testHeader))
assert.Equal(t, 8000, testHeader.Rate)
assert.Equal(t, "music", testHeader.Domain)
assert.Equal(t, 1000, testHeader.Limit)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBindWithQuery(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/?foo=bar&bar=foo", strings.NewReader("foo=unused"))
var obj struct {
Foo string `form:"foo"`
Bar string `form:"bar"`
}
require.NoError(t, c.BindQuery(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBindWithYAML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader("foo: bar\nbar: foo"))
c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type
var obj struct {
Foo string `yaml:"foo"`
Bar string `yaml:"bar"`
}
require.NoError(t, c.BindYAML(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBindWithTOML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader("foo = 'bar'\nbar = 'foo'"))
c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type
var obj struct {
Foo string `toml:"foo"`
Bar string `toml:"bar"`
}
require.NoError(t, c.BindTOML(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBadAutoBind(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "http://example.com", strings.NewReader("\"foo\":\"bar\", \"bar\":\"foo\"}"))
c.Request.Header.Add("Content-Type", MIMEJSON)
var obj struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
assert.False(t, c.IsAborted())
require.Error(t, c.Bind(&obj))
c.Writer.WriteHeaderNow()
assert.Empty(t, obj.Bar)
assert.Empty(t, obj.Foo)
assert.Equal(t, http.StatusBadRequest, w.Code)
assert.True(t, c.IsAborted())
}
func TestContextAutoShouldBindJSON(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`{"foo":"bar", "bar":"foo"}`))
c.Request.Header.Add("Content-Type", MIMEJSON)
var obj struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
require.NoError(t, c.ShouldBind(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Empty(t, c.Errors)
}
func TestContextShouldBindWithJSON(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`{"foo":"bar", "bar":"foo"}`))
c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type
var obj struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
require.NoError(t, c.ShouldBindJSON(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextShouldBindWithXML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
<bar>BAR</bar>
</root>`))
c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type
var obj struct {
Foo string `xml:"foo"`
Bar string `xml:"bar"`
}
require.NoError(t, c.ShouldBindXML(&obj))
assert.Equal(t, "FOO", obj.Foo)
assert.Equal(t, "BAR", obj.Bar)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextShouldBindPlain(t *testing.T) {
// string
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(`test string`))
c.Request.Header.Add("Content-Type", MIMEPlain)
var s string
require.NoError(t, c.ShouldBindPlain(&s))
assert.Equal(t, "test string", s)
assert.Equal(t, 0, w.Body.Len())
// []byte
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(`test []byte`))
c.Request.Header.Add("Content-Type", MIMEPlain)
var bs []byte
require.NoError(t, c.ShouldBindPlain(&bs))
assert.Equal(t, []byte("test []byte"), bs)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextShouldBindHeader(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.Header.Add("rate", "8000")
c.Request.Header.Add("domain", "music")
c.Request.Header.Add("limit", "1000")
var testHeader struct {
Rate int `header:"Rate"`
Domain string `header:"Domain"`
Limit int `header:"limit"`
}
require.NoError(t, c.ShouldBindHeader(&testHeader))
assert.Equal(t, 8000, testHeader.Rate)
assert.Equal(t, "music", testHeader.Domain)
assert.Equal(t, 1000, testHeader.Limit)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextShouldBindWithQuery(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/?foo=bar&bar=foo&Foo=bar1&Bar=foo1", strings.NewReader("foo=unused"))
var obj struct {
Foo string `form:"foo"`
Bar string `form:"bar"`
Foo1 string `form:"Foo"`
Bar1 string `form:"Bar"`
}
require.NoError(t, c.ShouldBindQuery(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, "foo1", obj.Bar1)
assert.Equal(t, "bar1", obj.Foo1)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextShouldBindWithYAML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader("foo: bar\nbar: foo"))
c.Request.Header.Add("Content-Type", MIMEXML) // set fake content-type
var obj struct {
Foo string `yaml:"foo"`
Bar string `yaml:"bar"`
}
require.NoError(t, c.ShouldBindYAML(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextShouldBindWithTOML(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader("foo='bar'\nbar= 'foo'"))
c.Request.Header.Add("Content-Type", MIMETOML) // set fake content-type
var obj struct {
Foo string `toml:"foo"`
Bar string `toml:"bar"`
}
require.NoError(t, c.ShouldBindTOML(&obj))
assert.Equal(t, "foo", obj.Bar)
assert.Equal(t, "bar", obj.Foo)
assert.Equal(t, 0, w.Body.Len())
}
func TestContextBadAutoShouldBind(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "http://example.com", strings.NewReader(`"foo":"bar", "bar":"foo"}`))
c.Request.Header.Add("Content-Type", MIMEJSON)
var obj struct {
Foo string `json:"foo"`
Bar string `json:"bar"`
}
assert.False(t, c.IsAborted())
require.Error(t, c.ShouldBind(&obj))
assert.Empty(t, obj.Bar)
assert.Empty(t, obj.Foo)
assert.False(t, c.IsAborted())
}
func TestContextShouldBindBodyWith(t *testing.T) {
type typeA struct {
Foo string `json:"foo" xml:"foo" binding:"required"`
}
type typeB struct {
Bar string `json:"bar" xml:"bar" binding:"required"`
}
for _, tt := range []struct {
name string
bindingA, bindingB binding.BindingBody
bodyA, bodyB string
}{
{
name: "JSON & JSON",
bindingA: binding.JSON,
bindingB: binding.JSON,
bodyA: `{"foo":"FOO"}`,
bodyB: `{"bar":"BAR"}`,
},
{
name: "JSON & XML",
bindingA: binding.JSON,
bindingB: binding.XML,
bodyA: `{"foo":"FOO"}`,
bodyB: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<bar>BAR</bar>
</root>`,
},
{
name: "XML & XML",
bindingA: binding.XML,
bindingB: binding.XML,
bodyA: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
</root>`,
bodyB: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<bar>BAR</bar>
</root>`,
},
} {
t.Logf("testing: %s", tt.name)
// bodyA to typeA and typeB
{
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(
http.MethodPost, "http://example.com", strings.NewReader(tt.bodyA),
)
// When it binds to typeA and typeB, it finds the body is
// not typeB but typeA.
objA := typeA{}
require.NoError(t, c.ShouldBindBodyWith(&objA, tt.bindingA))
assert.Equal(t, typeA{"FOO"}, objA)
objB := typeB{}
require.Error(t, c.ShouldBindBodyWith(&objB, tt.bindingB))
assert.NotEqual(t, typeB{"BAR"}, objB)
}
// bodyB to typeA and typeB
{
// When it binds to typeA and typeB, it finds the body is
// not typeA but typeB.
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(
http.MethodPost, "http://example.com", strings.NewReader(tt.bodyB),
)
objA := typeA{}
require.Error(t, c.ShouldBindBodyWith(&objA, tt.bindingA))
assert.NotEqual(t, typeA{"FOO"}, objA)
objB := typeB{}
require.NoError(t, c.ShouldBindBodyWith(&objB, tt.bindingB))
assert.Equal(t, typeB{"BAR"}, objB)
}
}
}
func TestContextShouldBindBodyWithJSON(t *testing.T) {
for _, tt := range []struct {
name string
bindingBody binding.BindingBody
body string
}{
{
name: " JSON & JSON-BODY ",
bindingBody: binding.JSON,
body: `{"foo":"FOO"}`,
},
{
name: " JSON & XML-BODY ",
bindingBody: binding.XML,
body: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
</root>`,
},
{
name: " JSON & YAML-BODY ",
bindingBody: binding.YAML,
body: `foo: FOO`,
},
{
name: " JSON & TOM-BODY ",
bindingBody: binding.TOML,
body: `foo=FOO`,
},
} {
t.Logf("testing: %s", tt.name)
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(tt.body))
type typeJSON struct {
Foo string `json:"foo" binding:"required"`
}
objJSON := typeJSON{}
if tt.bindingBody == binding.JSON {
require.NoError(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{"FOO"}, objJSON)
}
if tt.bindingBody == binding.XML {
require.Error(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{}, objJSON)
}
if tt.bindingBody == binding.YAML {
require.Error(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{}, objJSON)
}
if tt.bindingBody == binding.TOML {
require.Error(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{}, objJSON)
}
}
}
func TestContextShouldBindBodyWithXML(t *testing.T) {
for _, tt := range []struct {
name string
bindingBody binding.BindingBody
body string
}{
{
name: " XML & JSON-BODY ",
bindingBody: binding.JSON,
body: `{"foo":"FOO"}`,
},
{
name: " XML & XML-BODY ",
bindingBody: binding.XML,
body: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
</root>`,
},
{
name: " XML & YAML-BODY ",
bindingBody: binding.YAML,
body: `foo: FOO`,
},
{
name: " XML & TOM-BODY ",
bindingBody: binding.TOML,
body: `foo=FOO`,
},
} {
t.Logf("testing: %s", tt.name)
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(tt.body))
type typeXML struct {
Foo string `xml:"foo" binding:"required"`
}
objXML := typeXML{}
if tt.bindingBody == binding.JSON {
require.Error(t, c.ShouldBindBodyWithXML(&objXML))
assert.Equal(t, typeXML{}, objXML)
}
if tt.bindingBody == binding.XML {
require.NoError(t, c.ShouldBindBodyWithXML(&objXML))
assert.Equal(t, typeXML{"FOO"}, objXML)
}
if tt.bindingBody == binding.YAML {
require.Error(t, c.ShouldBindBodyWithXML(&objXML))
assert.Equal(t, typeXML{}, objXML)
}
if tt.bindingBody == binding.TOML {
require.Error(t, c.ShouldBindBodyWithXML(&objXML))
assert.Equal(t, typeXML{}, objXML)
}
}
}
func TestContextShouldBindBodyWithYAML(t *testing.T) {
for _, tt := range []struct {
name string
bindingBody binding.BindingBody
body string
}{
{
name: " YAML & JSON-BODY ",
bindingBody: binding.JSON,
body: `{"foo":"FOO"}`,
},
{
name: " YAML & XML-BODY ",
bindingBody: binding.XML,
body: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
</root>`,
},
{
name: " YAML & YAML-BODY ",
bindingBody: binding.YAML,
body: `foo: FOO`,
},
{
name: " YAML & TOM-BODY ",
bindingBody: binding.TOML,
body: `foo=FOO`,
},
} {
t.Logf("testing: %s", tt.name)
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(tt.body))
type typeYAML struct {
Foo string `yaml:"foo" binding:"required"`
}
objYAML := typeYAML{}
// YAML belongs to a super collection of JSON, so JSON can be parsed by YAML
if tt.bindingBody == binding.JSON {
require.NoError(t, c.ShouldBindBodyWithYAML(&objYAML))
assert.Equal(t, typeYAML{"FOO"}, objYAML)
}
if tt.bindingBody == binding.XML {
require.Error(t, c.ShouldBindBodyWithYAML(&objYAML))
assert.Equal(t, typeYAML{}, objYAML)
}
if tt.bindingBody == binding.YAML {
require.NoError(t, c.ShouldBindBodyWithYAML(&objYAML))
assert.Equal(t, typeYAML{"FOO"}, objYAML)
}
if tt.bindingBody == binding.TOML {
require.Error(t, c.ShouldBindBodyWithYAML(&objYAML))
assert.Equal(t, typeYAML{}, objYAML)
}
}
}
func TestContextShouldBindBodyWithTOML(t *testing.T) {
for _, tt := range []struct {
name string
bindingBody binding.BindingBody
body string
}{
{
name: " TOML & JSON-BODY ",
bindingBody: binding.JSON,
body: `{"foo":"FOO"}`,
},
{
name: " TOML & XML-BODY ",
bindingBody: binding.XML,
body: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
</root>`,
},
{
name: " TOML & YAML-BODY ",
bindingBody: binding.YAML,
body: `foo: FOO`,
},
{
name: " TOML & TOM-BODY ",
bindingBody: binding.TOML,
body: `foo = 'FOO'`,
},
} {
t.Logf("testing: %s", tt.name)
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(tt.body))
type typeTOML struct {
Foo string `toml:"foo" binding:"required"`
}
objTOML := typeTOML{}
if tt.bindingBody == binding.JSON {
require.Error(t, c.ShouldBindBodyWithTOML(&objTOML))
assert.Equal(t, typeTOML{}, objTOML)
}
if tt.bindingBody == binding.XML {
require.Error(t, c.ShouldBindBodyWithTOML(&objTOML))
assert.Equal(t, typeTOML{}, objTOML)
}
if tt.bindingBody == binding.YAML {
require.Error(t, c.ShouldBindBodyWithTOML(&objTOML))
assert.Equal(t, typeTOML{}, objTOML)
}
if tt.bindingBody == binding.TOML {
require.NoError(t, c.ShouldBindBodyWithTOML(&objTOML))
assert.Equal(t, typeTOML{"FOO"}, objTOML)
}
}
}
func TestContextShouldBindBodyWithPlain(t *testing.T) {
for _, tt := range []struct {
name string
bindingBody binding.BindingBody
body string
}{
{
name: " JSON & JSON-BODY ",
bindingBody: binding.JSON,
body: `{"foo":"FOO"}`,
},
{
name: " JSON & XML-BODY ",
bindingBody: binding.XML,
body: `<?xml version="1.0" encoding="UTF-8"?>
<root>
<foo>FOO</foo>
</root>`,
},
{
name: " JSON & YAML-BODY ",
bindingBody: binding.YAML,
body: `foo: FOO`,
},
{
name: " JSON & TOM-BODY ",
bindingBody: binding.TOML,
body: `foo=FOO`,
},
{
name: " JSON & Plain-BODY ",
bindingBody: binding.Plain,
body: `foo=FOO`,
},
} {
t.Logf("testing: %s", tt.name)
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
c.Request, _ = http.NewRequest(http.MethodPost, "/", bytes.NewBufferString(tt.body))
type typeJSON struct {
Foo string `json:"foo" binding:"required"`
}
objJSON := typeJSON{}
if tt.bindingBody == binding.Plain {
body := ""
require.NoError(t, c.ShouldBindBodyWithPlain(&body))
assert.Equal(t, "foo=FOO", body)
}
if tt.bindingBody == binding.JSON {
require.NoError(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{"FOO"}, objJSON)
}
if tt.bindingBody == binding.XML {
require.Error(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{}, objJSON)
}
if tt.bindingBody == binding.YAML {
require.Error(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{}, objJSON)
}
if tt.bindingBody == binding.TOML {
require.Error(t, c.ShouldBindBodyWithJSON(&objJSON))
assert.Equal(t, typeJSON{}, objJSON)
}
}
}
func TestContextGolangContext(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", strings.NewReader(`{"foo":"bar", "bar":"foo"}`))
require.NoError(t, c.Err())
assert.Nil(t, c.Done())
ti, ok := c.Deadline()
assert.Equal(t, time.Time{}, ti)
assert.False(t, ok)
assert.Equal(t, c.Value(ContextRequestKey), c.Request)
assert.Equal(t, c.Value(ContextKey), c)
assert.Nil(t, c.Value("foo"))
c.Set("foo", "bar")
assert.Equal(t, "bar", c.Value("foo"))
assert.Nil(t, c.Value(1))
}
func TestWebsocketsRequired(t *testing.T) {
// Example request from spec: https://tools.ietf.org/html/rfc6455#section-1.2
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodGet, "/chat", nil)
c.Request.Header.Set("Host", "server.example.com")
c.Request.Header.Set("Upgrade", "websocket")
c.Request.Header.Set("Connection", "Upgrade")
c.Request.Header.Set("Sec-WebSocket-Key", "dGhlIHNhbXBsZSBub25jZQ==")
c.Request.Header.Set("Origin", "http://example.com")
c.Request.Header.Set("Sec-WebSocket-Protocol", "chat, superchat")
c.Request.Header.Set("Sec-WebSocket-Version", "13")
assert.True(t, c.IsWebsocket())
// Normal request, no websocket required.
c, _ = CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodGet, "/chat", nil)
c.Request.Header.Set("Host", "server.example.com")
assert.False(t, c.IsWebsocket())
}
func TestGetRequestHeaderValue(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodGet, "/chat", nil)
c.Request.Header.Set("Gin-Version", "1.0.0")
assert.Equal(t, "1.0.0", c.GetHeader("Gin-Version"))
assert.Empty(t, c.GetHeader("Connection"))
}
func TestContextGetRawData(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
body := strings.NewReader("Fetch binary post data")
c.Request, _ = http.NewRequest(http.MethodPost, "/", body)
c.Request.Header.Add("Content-Type", MIMEPOSTForm)
data, err := c.GetRawData()
require.NoError(t, err)
assert.Equal(t, "Fetch binary post data", string(data))
}
func TestContextRenderDataFromReader(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
body := "#!PNG some raw data"
reader := strings.NewReader(body)
contentLength := int64(len(body))
contentType := "image/png"
extraHeaders := map[string]string{"Content-Disposition": `attachment; filename="gopher.png"`}
c.DataFromReader(http.StatusOK, contentLength, contentType, reader, extraHeaders)
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, body, w.Body.String())
assert.Equal(t, contentType, w.Header().Get("Content-Type"))
assert.Equal(t, strconv.FormatInt(contentLength, 10), w.Header().Get("Content-Length"))
assert.Equal(t, extraHeaders["Content-Disposition"], w.Header().Get("Content-Disposition"))
}
func TestContextRenderDataFromReaderNoHeaders(t *testing.T) {
w := httptest.NewRecorder()
c, _ := CreateTestContext(w)
body := "#!PNG some raw data"
reader := strings.NewReader(body)
contentLength := int64(len(body))
contentType := "image/png"
c.DataFromReader(http.StatusOK, contentLength, contentType, reader, nil)
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, body, w.Body.String())
assert.Equal(t, contentType, w.Header().Get("Content-Type"))
assert.Equal(t, strconv.FormatInt(contentLength, 10), w.Header().Get("Content-Length"))
}
type TestResponseRecorder struct {
*httptest.ResponseRecorder
closeChannel chan bool
}
func (r *TestResponseRecorder) CloseNotify() <-chan bool {
return r.closeChannel
}
func (r *TestResponseRecorder) closeClient() {
r.closeChannel <- true
}
func CreateTestResponseRecorder() *TestResponseRecorder {
return &TestResponseRecorder{
httptest.NewRecorder(),
make(chan bool, 1),
}
}
func TestContextStream(t *testing.T) {
w := CreateTestResponseRecorder()
c, _ := CreateTestContext(w)
stopStream := true
c.Stream(func(w io.Writer) bool {
defer func() {
stopStream = false
}()
_, err := w.Write([]byte("test"))
require.NoError(t, err)
return stopStream
})
assert.Equal(t, "testtest", w.Body.String())
}
func TestContextStreamWithClientGone(t *testing.T) {
w := CreateTestResponseRecorder()
c, _ := CreateTestContext(w)
c.Stream(func(writer io.Writer) bool {
defer func() {
w.closeClient()
}()
_, err := writer.Write([]byte("test"))
require.NoError(t, err)
return true
})
assert.Equal(t, "test", w.Body.String())
}
func TestContextResetInHandler(t *testing.T) {
w := CreateTestResponseRecorder()
c, _ := CreateTestContext(w)
c.handlers = []HandlerFunc{
func(c *Context) { c.reset() },
}
assert.NotPanics(t, func() {
c.Next()
})
}
func TestRaceParamsContextCopy(t *testing.T) {
DefaultWriter = os.Stdout
router := Default()
nameGroup := router.Group("/:name")
var wg sync.WaitGroup
wg.Add(2)
{
nameGroup.GET("/api", func(c *Context) {
go func(c *Context, param string) {
defer wg.Done()
// First assert must be executed after the second request
time.Sleep(50 * time.Millisecond)
assert.Equal(t, c.Param("name"), param)
}(c.Copy(), c.Param("name"))
})
}
PerformRequest(router, http.MethodGet, "/name1/api")
PerformRequest(router, http.MethodGet, "/name2/api")
wg.Wait()
}
func TestContextWithKeysMutex(t *testing.T) {
c := &Context{}
c.Set("foo", "bar")
value, err := c.Get("foo")
assert.Equal(t, "bar", value)
assert.True(t, err)
value, err = c.Get("foo2")
assert.Nil(t, value)
assert.False(t, err)
}
func TestRemoteIPFail(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request.RemoteAddr = "[:::]:80"
ip := net.ParseIP(c.RemoteIP())
trust := c.engine.isTrustedProxy(ip)
assert.Nil(t, ip)
assert.False(t, trust)
}
func TestHasRequestContext(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
assert.False(t, c.hasRequestContext(), "no request, no fallback")
c.engine.ContextWithFallback = true
assert.False(t, c.hasRequestContext(), "no request, has fallback")
c.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
assert.True(t, c.hasRequestContext(), "has request, has fallback")
c.Request, _ = http.NewRequestWithContext(nil, "", "", nil) //nolint:staticcheck
assert.False(t, c.hasRequestContext(), "has request with nil ctx, has fallback")
c.engine.ContextWithFallback = false
assert.False(t, c.hasRequestContext(), "has request, no fallback")
c = &Context{}
assert.False(t, c.hasRequestContext(), "no request, no engine")
c.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
assert.False(t, c.hasRequestContext(), "has request, no engine")
}
func TestContextWithFallbackDeadlineFromRequestContext(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c.engine.ContextWithFallback = true
deadline, ok := c.Deadline()
assert.Zero(t, deadline)
assert.False(t, ok)
c2, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c2.engine.ContextWithFallback = true
c2.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
d := time.Now().Add(time.Second)
ctx, cancel := context.WithDeadline(context.Background(), d)
defer cancel()
c2.Request = c2.Request.WithContext(ctx)
deadline, ok = c2.Deadline()
assert.Equal(t, d, deadline)
assert.True(t, ok)
}
func TestContextWithFallbackDoneFromRequestContext(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c.engine.ContextWithFallback = true
assert.Nil(t, c.Done())
c2, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c2.engine.ContextWithFallback = true
c2.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
ctx, cancel := context.WithCancel(context.Background())
c2.Request = c2.Request.WithContext(ctx)
cancel()
assert.NotNil(t, <-c2.Done())
}
func TestContextWithFallbackErrFromRequestContext(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c.engine.ContextWithFallback = true
require.NoError(t, c.Err())
c2, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c2.engine.ContextWithFallback = true
c2.Request, _ = http.NewRequest(http.MethodGet, "/", nil)
ctx, cancel := context.WithCancel(context.Background())
c2.Request = c2.Request.WithContext(ctx)
cancel()
assert.EqualError(t, c2.Err(), context.Canceled.Error())
}
func TestContextWithFallbackValueFromRequestContext(t *testing.T) {
type contextKey string
tests := []struct {
name string
getContextAndKey func() (*Context, any)
value any
}{
{
name: "c with struct context key",
getContextAndKey: func() (*Context, any) {
type KeyStruct struct{} // https://staticcheck.dev/docs/checks/#SA1029
var key KeyStruct
c, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c.engine.ContextWithFallback = true
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request = c.Request.WithContext(context.WithValue(context.TODO(), key, "value"))
return c, key
},
value: "value",
},
{
name: "c with string context key",
getContextAndKey: func() (*Context, any) {
c, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c.engine.ContextWithFallback = true
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
c.Request = c.Request.WithContext(context.WithValue(context.TODO(), contextKey("key"), "value"))
return c, contextKey("key")
},
value: "value",
},
{
name: "c with nil http.Request",
getContextAndKey: func() (*Context, any) {
c, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c.engine.ContextWithFallback = true
c.Request = nil
return c, "key"
},
value: nil,
},
{
name: "c with nil http.Request.Context()",
getContextAndKey: func() (*Context, any) {
c, _ := CreateTestContext(httptest.NewRecorder())
// enable ContextWithFallback feature flag
c.engine.ContextWithFallback = true
c.Request, _ = http.NewRequest(http.MethodPost, "/", nil)
return c, "key"
},
value: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
c, key := tt.getContextAndKey()
assert.Equal(t, tt.value, c.Value(key))
})
}
}
func TestContextCopyShouldNotCancel(t *testing.T) {
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
defer srv.Close()
ensureRequestIsOver := make(chan struct{})
wg := &sync.WaitGroup{}
r := New()
r.GET("/", func(ginctx *Context) {
wg.Add(1)
ginctx = ginctx.Copy()
// start async goroutine for calling srv
go func() {
defer wg.Done()
<-ensureRequestIsOver // ensure request is done
req, err := http.NewRequestWithContext(ginctx, http.MethodGet, srv.URL, nil)
must(err)
res, err := http.DefaultClient.Do(req)
if err != nil {
t.Error(fmt.Errorf("request error: %w", err))
return
}
if res.StatusCode != http.StatusOK {
t.Error(fmt.Errorf("unexpected status code: %s", res.Status))
}
}()
})
l, err := net.Listen("tcp", ":0")
must(err)
go func() {
s := &http.Server{
Handler: r,
}
must(s.Serve(l))
}()
addr := strings.Split(l.Addr().String(), ":")
res, err := http.Get(fmt.Sprintf("http://%s:%s/", localhostIP, addr[len(addr)-1]))
if err != nil {
t.Error(fmt.Errorf("request error: %w", err))
return
}
close(ensureRequestIsOver)
if res.StatusCode != http.StatusOK {
t.Error(fmt.Errorf("unexpected status code: %s", res.Status))
return
}
wg.Wait()
}
func TestContextAddParam(t *testing.T) {
c := &Context{}
id := "id"
value := "1"
c.AddParam(id, value)
v, ok := c.Params.Get(id)
assert.True(t, ok)
assert.Equal(t, value, v)
}
func TestCreateTestContextWithRouteParams(t *testing.T) {
w := httptest.NewRecorder()
engine := New()
engine.GET("/:action/:name", func(ctx *Context) {
ctx.String(http.StatusOK, "%s %s", ctx.Param("action"), ctx.Param("name"))
})
c := CreateTestContextOnly(w, engine)
c.Request, _ = http.NewRequest(http.MethodGet, "/hello/gin", nil)
engine.HandleContext(c)
assert.Equal(t, http.StatusOK, w.Code)
assert.Equal(t, "hello gin", w.Body.String())
}
type interceptedWriter struct {
ResponseWriter
b *bytes.Buffer
}
func (i interceptedWriter) WriteHeader(code int) {
i.Header().Del("X-Test")
i.ResponseWriter.WriteHeader(code)
}
func TestInterceptedHeader(t *testing.T) {
w := httptest.NewRecorder()
c, r := CreateTestContext(w)
r.Use(func(c *Context) {
i := interceptedWriter{
ResponseWriter: c.Writer,
b: bytes.NewBuffer(nil),
}
c.Writer = i
c.Next()
c.Header("X-Test", "overridden")
c.Writer = i.ResponseWriter
})
r.GET("/", func(c *Context) {
c.Header("X-Test", "original")
c.Header("X-Test-2", "present")
c.String(http.StatusOK, "hello world")
})
c.Request = httptest.NewRequest(http.MethodGet, "/", nil)
r.HandleContext(c)
// Result() has headers frozen when WriteHeaderNow() has been called
// Compared to this time, this is when the response headers will be flushed
// As response is flushed on c.String, the Header cannot be set by the first
// middleware. Assert this
assert.Empty(t, w.Result().Header.Get("X-Test"))
assert.Equal(t, "present", w.Result().Header.Get("X-Test-2"))
}
func TestContextNext(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
// Test with no handlers
c.Next()
assert.Equal(t, int8(0), c.index)
// Test with one handler
c.index = -1
c.handlers = HandlersChain{func(c *Context) {
c.Set("key", "value")
}}
c.Next()
assert.Equal(t, int8(1), c.index)
value, exists := c.Get("key")
assert.True(t, exists)
assert.Equal(t, "value", value)
// Test with multiple handlers
c.handlers = HandlersChain{
func(c *Context) {
c.Set("key1", "value1")
c.Next()
c.Set("key2", "value2")
},
nil,
func(c *Context) {
c.Set("key3", "value3")
},
}
c.index = -1
c.Next()
assert.Equal(t, int8(4), c.index)
value, exists = c.Get("key1")
assert.True(t, exists)
assert.Equal(t, "value1", value)
value, exists = c.Get("key2")
assert.True(t, exists)
assert.Equal(t, "value2", value)
value, exists = c.Get("key3")
assert.True(t, exists)
assert.Equal(t, "value3", value)
}
func TestContextSetCookieData(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
c.SetSameSite(http.SameSiteLaxMode)
var setCookie string
// Basic cookie settings
cookie := &http.Cookie{
Name: "user",
Value: "gin",
MaxAge: 1,
Path: "/",
Domain: "localhost",
Secure: true,
HttpOnly: true,
}
c.SetCookieData(cookie)
setCookie = c.Writer.Header().Get("Set-Cookie")
assert.Contains(t, setCookie, "user=gin")
assert.Contains(t, setCookie, "Path=/")
assert.Contains(t, setCookie, "Domain=localhost")
assert.Contains(t, setCookie, "Max-Age=1")
assert.Contains(t, setCookie, "HttpOnly")
assert.Contains(t, setCookie, "Secure")
// SameSite=Lax might be omitted in Go 1.24+ as it's the default
// assert.Contains(t, setCookie, "SameSite=Lax")
// Test that when Path is empty, "/" is automatically set
cookie = &http.Cookie{
Name: "user",
Value: "gin",
MaxAge: 1,
Path: "",
Domain: "localhost",
Secure: true,
HttpOnly: true,
}
c.SetCookieData(cookie)
setCookie = c.Writer.Header().Get("Set-Cookie")
assert.Contains(t, setCookie, "user=gin")
assert.Contains(t, setCookie, "Path=/")
assert.Contains(t, setCookie, "Domain=localhost")
assert.Contains(t, setCookie, "Max-Age=1")
assert.Contains(t, setCookie, "HttpOnly")
assert.Contains(t, setCookie, "Secure")
// SameSite=Lax might be omitted in Go 1.24+ as it's the default
// assert.Contains(t, setCookie, "SameSite=Lax")
// Test additional cookie attributes (Expires)
expireTime := time.Now().Add(24 * time.Hour)
cookie = &http.Cookie{
Name: "user",
Value: "gin",
Path: "/",
Domain: "localhost",
Expires: expireTime,
Secure: true,
HttpOnly: true,
}
c.SetCookieData(cookie)
// Since the Expires value varies by time, partially verify with Contains
setCookie = c.Writer.Header().Get("Set-Cookie")
assert.Contains(t, setCookie, "user=gin")
assert.Contains(t, setCookie, "Path=/")
assert.Contains(t, setCookie, "Domain=localhost")
assert.Contains(t, setCookie, "HttpOnly")
assert.Contains(t, setCookie, "Secure")
// SameSite=Lax might be omitted in Go 1.24+ as it's the default
// assert.Contains(t, setCookie, "SameSite=Lax")
// Test for Partitioned attribute (Go 1.18+)
cookie = &http.Cookie{
Name: "user",
Value: "gin",
Path: "/",
Domain: "localhost",
Secure: true,
HttpOnly: true,
Partitioned: true,
}
c.SetCookieData(cookie)
setCookie = c.Writer.Header().Get("Set-Cookie")
assert.Contains(t, setCookie, "user=gin")
assert.Contains(t, setCookie, "Path=/")
assert.Contains(t, setCookie, "Domain=localhost")
assert.Contains(t, setCookie, "HttpOnly")
assert.Contains(t, setCookie, "Secure")
// SameSite=Lax might be omitted in Go 1.24+ as it's the default
// assert.Contains(t, setCookie, "SameSite=Lax")
// Not testing for Partitioned attribute as it may not be supported in all Go versions
// Test that SameSiteStrictMode is explicitly included in the header
t.Run("SameSite=Strict is included", func(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
cookie := &http.Cookie{
Name: "user",
Value: "gin",
Path: "/",
Domain: "localhost",
Secure: true,
HttpOnly: true,
SameSite: http.SameSiteStrictMode,
}
c.SetCookieData(cookie)
setCookie := c.Writer.Header().Get("Set-Cookie")
assert.Contains(t, setCookie, "SameSite=Strict")
})
// Test that SameSiteNoneMode is explicitly included in the header
t.Run("SameSite=None is included", func(t *testing.T) {
c, _ := CreateTestContext(httptest.NewRecorder())
cookie := &http.Cookie{
Name: "user",
Value: "gin",
Path: "/",
Domain: "localhost",
Secure: true,
HttpOnly: true,
SameSite: http.SameSiteNoneMode,
}
c.SetCookieData(cookie)
setCookie := c.Writer.Header().Get("Set-Cookie")
assert.Contains(t, setCookie, "SameSite=None")
})
}
func TestGetMapFromFormData(t *testing.T) {
testCases := []struct {
name string
data map[string][]string
key string
expected map[string]string
found bool
}{
{
name: "Basic bracket notation",
data: map[string][]string{
"ids[a]": {"hi"},
"ids[b]": {"3.14"},
},
key: "ids",
expected: map[string]string{
"a": "hi",
"b": "3.14",
},
found: true,
},
{
name: "Mixed data with bracket notation",
data: map[string][]string{
"ids[a]": {"hi"},
"ids[b]": {"3.14"},
"names[a]": {"mike"},
"names[b]": {"maria"},
"other[key]": {"value"},
"simple": {"data"},
},
key: "ids",
expected: map[string]string{
"a": "hi",
"b": "3.14",
},
found: true,
},
{
name: "Names key",
data: map[string][]string{
"ids[a]": {"hi"},
"ids[b]": {"3.14"},
"names[a]": {"mike"},
"names[b]": {"maria"},
"other[key]": {"value"},
},
key: "names",
expected: map[string]string{
"a": "mike",
"b": "maria",
},
found: true,
},
{
name: "Key not found",
data: map[string][]string{
"ids[a]": {"hi"},
"names[b]": {"maria"},
},
key: "notfound",
expected: map[string]string{},
found: false,
},
{
name: "Empty data",
data: map[string][]string{},
key: "ids",
expected: map[string]string{},
found: false,
},
{
name: "Malformed bracket notation",
data: map[string][]string{
"ids[a": {"hi"}, // Missing closing bracket
"ids]b": {"3.14"}, // Missing opening bracket
"idsab": {"value"}, // No brackets
},
key: "ids",
expected: map[string]string{},
found: false,
},
{
name: "Nested bracket notation",
data: map[string][]string{
"ids[a][b]": {"nested"},
"ids[c]": {"simple"},
},
key: "ids",
expected: map[string]string{
"a": "nested",
"c": "simple",
},
found: true,
},
{
name: "Simple key without brackets",
data: map[string][]string{
"simple": {"data"},
"ids[a]": {"hi"},
},
key: "simple",
expected: map[string]string{},
found: false,
},
{
name: "Mixed simple and bracket keys",
data: map[string][]string{
"simple": {"data"},
"ids[a]": {"hi"},
"ids[b]": {"3.14"},
"other": {"value"},
},
key: "ids",
expected: map[string]string{
"a": "hi",
"b": "3.14",
},
found: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result, found := getMapFromFormData(tc.data, tc.key)
assert.Equal(t, tc.expected, result, "result mismatch")
assert.Equal(t, tc.found, found, "found mismatch")
})
}
}
func BenchmarkGetMapFromFormData(b *testing.B) {
// Test case 1: Small dataset with bracket notation
smallData := map[string][]string{
"ids[a]": {"hi"},
"ids[b]": {"3.14"},
"names[a]": {"mike"},
"names[b]": {"maria"},
}
// Test case 2: Medium dataset with mixed data
mediumData := map[string][]string{
"ids[a]": {"hi"},
"ids[b]": {"3.14"},
"ids[c]": {"test"},
"ids[d]": {"value"},
"names[a]": {"mike"},
"names[b]": {"maria"},
"names[c]": {"john"},
"names[d]": {"jane"},
"other[key1]": {"value1"},
"other[key2]": {"value2"},
"simple": {"data"},
"another": {"info"},
}
// Test case 3: Large dataset with many bracket keys
largeData := make(map[string][]string)
for i := range 100 {
key := fmt.Sprintf("ids[%d]", i)
largeData[key] = []string{fmt.Sprintf("value%d", i)}
}
for i := range 50 {
key := fmt.Sprintf("names[%d]", i)
largeData[key] = []string{fmt.Sprintf("name%d", i)}
}
for i := range 25 {
key := fmt.Sprintf("other[key%d]", i)
largeData[key] = []string{fmt.Sprintf("other%d", i)}
}
// Test case 4: Dataset with many non-matching keys (worst case)
worstCaseData := make(map[string][]string)
for i := range 100 {
key := fmt.Sprintf("nonmatching%d", i)
worstCaseData[key] = []string{fmt.Sprintf("value%d", i)}
}
worstCaseData["ids[a]"] = []string{"hi"}
worstCaseData["ids[b]"] = []string{"3.14"}
// Test case 5: Dataset with short keys (best case for early exit)
shortKeysData := map[string][]string{
"a": {"value1"},
"b": {"value2"},
"ids[a]": {"hi"},
"ids[b]": {"3.14"},
}
benchmarks := []struct {
name string
data map[string][]string
key string
}{
{"Small_Bracket", smallData, "ids"},
{"Small_Names", smallData, "names"},
{"Medium_Bracket", mediumData, "ids"},
{"Medium_Names", mediumData, "names"},
{"Medium_Other", mediumData, "other"},
{"Large_Bracket", largeData, "ids"},
{"Large_Names", largeData, "names"},
{"Large_Other", largeData, "other"},
{"WorstCase_Bracket", worstCaseData, "ids"},
{"ShortKeys_Bracket", shortKeysData, "ids"},
{"Empty_Key", smallData, "notfound"},
}
for _, bm := range benchmarks {
b.Run(bm.name, func(b *testing.B) {
b.ReportAllocs()
for b.Loop() {
_, _ = getMapFromFormData(bm.data, bm.key)
}
})
}
}
|
go
|
github
|
https://github.com/gin-gonic/gin
|
context_test.go
|
/*
** $Id: ldblib.c,v 1.104.1.4 2009/08/04 18:50:18 roberto Exp $
** Interface from Lua to its debug API
** See Copyright Notice in lua.h
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define ldblib_c
#define LUA_LIB
#include "lua.h"
#include "lauxlib.h"
#include "lualib.h"
static int db_getregistry (lua_State *L) {
lua_pushvalue(L, LUA_REGISTRYINDEX);
return 1;
}
static int db_getmetatable (lua_State *L) {
luaL_checkany(L, 1);
if (!lua_getmetatable(L, 1)) {
lua_pushnil(L); /* no metatable */
}
return 1;
}
static int db_setmetatable (lua_State *L) {
int t = lua_type(L, 2);
luaL_argcheck(L, t == LUA_TNIL || t == LUA_TTABLE, 2,
"nil or table expected");
lua_settop(L, 2);
lua_pushboolean(L, lua_setmetatable(L, 1));
return 1;
}
static int db_getfenv (lua_State *L) {
luaL_checkany(L, 1);
lua_getfenv(L, 1);
return 1;
}
static int db_setfenv (lua_State *L) {
luaL_checktype(L, 2, LUA_TTABLE);
lua_settop(L, 2);
if (lua_setfenv(L, 1) == 0)
luaL_error(L, LUA_QL("setfenv")
" cannot change environment of given object");
return 1;
}
static void settabss (lua_State *L, const char *i, const char *v) {
lua_pushstring(L, v);
lua_setfield(L, -2, i);
}
static void settabsi (lua_State *L, const char *i, int v) {
lua_pushinteger(L, v);
lua_setfield(L, -2, i);
}
static lua_State *getthread (lua_State *L, int *arg) {
if (lua_isthread(L, 1)) {
*arg = 1;
return lua_tothread(L, 1);
}
else {
*arg = 0;
return L;
}
}
static void treatstackoption (lua_State *L, lua_State *L1, const char *fname) {
if (L == L1) {
lua_pushvalue(L, -2);
lua_remove(L, -3);
}
else
lua_xmove(L1, L, 1);
lua_setfield(L, -2, fname);
}
static int db_getinfo (lua_State *L) {
lua_Debug ar;
int arg;
lua_State *L1 = getthread(L, &arg);
const char *options = luaL_optstring(L, arg+2, "flnSu");
if (lua_isnumber(L, arg+1)) {
if (!lua_getstack(L1, (int)lua_tointeger(L, arg+1), &ar)) {
lua_pushnil(L); /* level out of range */
return 1;
}
}
else if (lua_isfunction(L, arg+1)) {
lua_pushfstring(L, ">%s", options);
options = lua_tostring(L, -1);
lua_pushvalue(L, arg+1);
lua_xmove(L, L1, 1);
}
else
return luaL_argerror(L, arg+1, "function or level expected");
if (!lua_getinfo(L1, options, &ar))
return luaL_argerror(L, arg+2, "invalid option");
lua_createtable(L, 0, 2);
if (strchr(options, 'S')) {
settabss(L, "source", ar.source);
settabss(L, "short_src", ar.short_src);
settabsi(L, "linedefined", ar.linedefined);
settabsi(L, "lastlinedefined", ar.lastlinedefined);
settabss(L, "what", ar.what);
}
if (strchr(options, 'l'))
settabsi(L, "currentline", ar.currentline);
if (strchr(options, 'u'))
settabsi(L, "nups", ar.nups);
if (strchr(options, 'n')) {
settabss(L, "name", ar.name);
settabss(L, "namewhat", ar.namewhat);
}
if (strchr(options, 'L'))
treatstackoption(L, L1, "activelines");
if (strchr(options, 'f'))
treatstackoption(L, L1, "func");
return 1; /* return table */
}
static int db_getlocal (lua_State *L) {
int arg;
lua_State *L1 = getthread(L, &arg);
lua_Debug ar;
const char *name;
if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */
return luaL_argerror(L, arg+1, "level out of range");
name = lua_getlocal(L1, &ar, luaL_checkint(L, arg+2));
if (name) {
lua_xmove(L1, L, 1);
lua_pushstring(L, name);
lua_pushvalue(L, -2);
return 2;
}
else {
lua_pushnil(L);
return 1;
}
}
static int db_setlocal (lua_State *L) {
int arg;
lua_State *L1 = getthread(L, &arg);
lua_Debug ar;
if (!lua_getstack(L1, luaL_checkint(L, arg+1), &ar)) /* out of range? */
return luaL_argerror(L, arg+1, "level out of range");
luaL_checkany(L, arg+3);
lua_settop(L, arg+3);
lua_xmove(L, L1, 1);
lua_pushstring(L, lua_setlocal(L1, &ar, luaL_checkint(L, arg+2)));
return 1;
}
static int auxupvalue (lua_State *L, int get) {
const char *name;
int n = luaL_checkint(L, 2);
luaL_checktype(L, 1, LUA_TFUNCTION);
if (lua_iscfunction(L, 1)) return 0; /* cannot touch C upvalues from Lua */
name = get ? lua_getupvalue(L, 1, n) : lua_setupvalue(L, 1, n);
if (name == NULL) return 0;
lua_pushstring(L, name);
lua_insert(L, -(get+1));
return get + 1;
}
static int db_getupvalue (lua_State *L) {
return auxupvalue(L, 1);
}
static int db_setupvalue (lua_State *L) {
luaL_checkany(L, 3);
return auxupvalue(L, 0);
}
static const char KEY_HOOK = 'h';
static void hookf (lua_State *L, lua_Debug *ar) {
static const char *const hooknames[] =
{"call", "return", "line", "count", "tail return"};
lua_pushlightuserdata(L, (void *)&KEY_HOOK);
lua_rawget(L, LUA_REGISTRYINDEX);
lua_pushlightuserdata(L, L);
lua_rawget(L, -2);
if (lua_isfunction(L, -1)) {
lua_pushstring(L, hooknames[(int)ar->event]);
if (ar->currentline >= 0)
lua_pushinteger(L, ar->currentline);
else lua_pushnil(L);
lua_assert(lua_getinfo(L, "lS", ar));
lua_call(L, 2, 0);
}
}
static int makemask (const char *smask, int count) {
int mask = 0;
if (strchr(smask, 'c')) mask |= LUA_MASKCALL;
if (strchr(smask, 'r')) mask |= LUA_MASKRET;
if (strchr(smask, 'l')) mask |= LUA_MASKLINE;
if (count > 0) mask |= LUA_MASKCOUNT;
return mask;
}
static char *unmakemask (int mask, char *smask) {
int i = 0;
if (mask & LUA_MASKCALL) smask[i++] = 'c';
if (mask & LUA_MASKRET) smask[i++] = 'r';
if (mask & LUA_MASKLINE) smask[i++] = 'l';
smask[i] = '\0';
return smask;
}
static void gethooktable (lua_State *L) {
lua_pushlightuserdata(L, (void *)&KEY_HOOK);
lua_rawget(L, LUA_REGISTRYINDEX);
if (!lua_istable(L, -1)) {
lua_pop(L, 1);
lua_createtable(L, 0, 1);
lua_pushlightuserdata(L, (void *)&KEY_HOOK);
lua_pushvalue(L, -2);
lua_rawset(L, LUA_REGISTRYINDEX);
}
}
static int db_sethook (lua_State *L) {
int arg, mask, count;
lua_Hook func;
lua_State *L1 = getthread(L, &arg);
if (lua_isnoneornil(L, arg+1)) {
lua_settop(L, arg+1);
func = NULL; mask = 0; count = 0; /* turn off hooks */
}
else {
const char *smask = luaL_checkstring(L, arg+2);
luaL_checktype(L, arg+1, LUA_TFUNCTION);
count = luaL_optint(L, arg+3, 0);
func = hookf; mask = makemask(smask, count);
}
gethooktable(L);
lua_pushlightuserdata(L, L1);
lua_pushvalue(L, arg+1);
lua_rawset(L, -3); /* set new hook */
lua_pop(L, 1); /* remove hook table */
lua_sethook(L1, func, mask, count); /* set hooks */
return 0;
}
static int db_gethook (lua_State *L) {
int arg;
lua_State *L1 = getthread(L, &arg);
char buff[5];
int mask = lua_gethookmask(L1);
lua_Hook hook = lua_gethook(L1);
if (hook != NULL && hook != hookf) /* external hook? */
lua_pushliteral(L, "external hook");
else {
gethooktable(L);
lua_pushlightuserdata(L, L1);
lua_rawget(L, -2); /* get hook */
lua_remove(L, -2); /* remove hook table */
}
lua_pushstring(L, unmakemask(mask, buff));
lua_pushinteger(L, lua_gethookcount(L1));
return 3;
}
static int db_debug (lua_State *L) {
for (;;) {
char buffer[250];
fputs("lua_debug> ", stderr);
if (fgets(buffer, sizeof(buffer), stdin) == 0 ||
strcmp(buffer, "cont\n") == 0)
return 0;
if (luaL_loadbuffer(L, buffer, strlen(buffer), "=(debug command)") ||
lua_pcall(L, 0, 0, 0)) {
fputs(lua_tostring(L, -1), stderr);
fputs("\n", stderr);
}
lua_settop(L, 0); /* remove eventual returns */
}
}
#define LEVELS1 12 /* size of the first part of the stack */
#define LEVELS2 10 /* size of the second part of the stack */
static int db_errorfb (lua_State *L) {
int level;
int firstpart = 1; /* still before eventual `...' */
int arg;
lua_State *L1 = getthread(L, &arg);
lua_Debug ar;
if (lua_isnumber(L, arg+2)) {
level = (int)lua_tointeger(L, arg+2);
lua_pop(L, 1);
}
else
level = (L == L1) ? 1 : 0; /* level 0 may be this own function */
if (lua_gettop(L) == arg)
lua_pushliteral(L, "");
else if (!lua_isstring(L, arg+1)) return 1; /* message is not a string */
else lua_pushliteral(L, "\n");
lua_pushliteral(L, "stack traceback:");
while (lua_getstack(L1, level++, &ar)) {
if (level > LEVELS1 && firstpart) {
/* no more than `LEVELS2' more levels? */
if (!lua_getstack(L1, level+LEVELS2, &ar))
level--; /* keep going */
else {
lua_pushliteral(L, "\n\t..."); /* too many levels */
while (lua_getstack(L1, level+LEVELS2, &ar)) /* find last levels */
level++;
}
firstpart = 0;
continue;
}
lua_pushliteral(L, "\n\t");
lua_getinfo(L1, "Snl", &ar);
lua_pushfstring(L, "%s:", ar.short_src);
if (ar.currentline > 0)
lua_pushfstring(L, "%d:", ar.currentline);
if (*ar.namewhat != '\0') /* is there a name? */
lua_pushfstring(L, " in function " LUA_QS, ar.name);
else {
if (*ar.what == 'm') /* main? */
lua_pushfstring(L, " in main chunk");
else if (*ar.what == 'C' || *ar.what == 't')
lua_pushliteral(L, " ?"); /* C function or tail call */
else
lua_pushfstring(L, " in function <%s:%d>",
ar.short_src, ar.linedefined);
}
lua_concat(L, lua_gettop(L) - arg);
}
lua_concat(L, lua_gettop(L) - arg);
return 1;
}
static const luaL_Reg dblib[] = {
{"debug", db_debug},
{"getfenv", db_getfenv},
{"gethook", db_gethook},
{"getinfo", db_getinfo},
{"getlocal", db_getlocal},
{"getregistry", db_getregistry},
{"getmetatable", db_getmetatable},
{"getupvalue", db_getupvalue},
{"setfenv", db_setfenv},
{"sethook", db_sethook},
{"setlocal", db_setlocal},
{"setmetatable", db_setmetatable},
{"setupvalue", db_setupvalue},
{"traceback", db_errorfb},
{NULL, NULL}
};
LUALIB_API int luaopen_debug (lua_State *L) {
luaL_register(L, LUA_DBLIBNAME, dblib);
return 1;
}
|
c
|
github
|
https://github.com/redis/redis
|
deps/lua/src/ldblib.c
|
#!/usr/bin/python
# used by count.sh
import re
import sys
import string
fname = sys.argv[1]
str = (open(fname, 'r')).read()
#measures = [(str[a.start():(3+string.find(str,"@-}", a.start()))]) for a in list(re.finditer('{-@ measure', str)) ]
other = [(str[a.start():(3+string.find(str,"@-}", a.start()))]) for a in list(re.finditer('{-@ (type|measure|data|include|predicate|Decrease|Strict)', str)) ]
qualifs = [(str[a.start():(3+string.find(str,"@-}", a.start()))]) for a in list(re.finditer('{-@ qualif', str)) ]
tyspecs = [(str[a.start():(3+string.find(str,"@-}", a.start()))]) for a in list(re.finditer('{-@ (?!(type|measure|data|include|predicate|qualif|Decrease|Strict))', str)) ]
#print measures
#print tyspecs
#print other
#print "Measures :\t\t count = %d \t chars = %d \t lines = %d" %(len(measures), sum(map(lambda x:len(x), measures)), sum(map(lambda x:(1+x.count('\n')), measures)))
print "Type specifications:\t\t count = %d \t lines = %d" %(len(tyspecs), sum(map(lambda x:(1+x.count('\n')), tyspecs)))
print "Qualifiers :\t\t count = %d \t lines = %d" %(len(qualifs), sum(map(lambda x:(1+x.count('\n')), qualifs)))
print "Other Annotations :\t\t count = %d \t lines = %d" %(len(other), sum(map(lambda x:(1+x.count('\n')), other)))
ftyspec = open('_'.join(["tyspec", fname.replace('/','_'), ".txt"]), 'w')
fother = open('_'.join(["other", fname.replace('/','_'), ".txt"]), 'w')
#tmp.write("TYSPECS\n\n")
tyspecsJoined = '\n'.join(tyspecs)
ftyspec.write(tyspecsJoined)
#tmp.write("\n\nOTHER\n\n")
otherJoined = '\n'.join(other)
fother.write(otherJoined)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_pgjson.fields
class Migration(migrations.Migration):
dependencies = [
('custom_attributes', '0004_create_empty_customattributesvalues_for_existen_object'),
]
operations = [
migrations.AlterField(
model_name='issuecustomattributesvalues',
name='attributes_values',
field=django_pgjson.fields.JsonField(verbose_name='values', default={}),
preserve_default=True,
),
migrations.AlterField(
model_name='taskcustomattributesvalues',
name='attributes_values',
field=django_pgjson.fields.JsonField(verbose_name='values', default={}),
preserve_default=True,
),
migrations.AlterField(
model_name='userstorycustomattributesvalues',
name='attributes_values',
field=django_pgjson.fields.JsonField(verbose_name='values', default={}),
preserve_default=True,
),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
---
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/plugins/current/discovery-gce-usage-cloning.html
---
# Cloning your existing machine [discovery-gce-usage-cloning]
In order to build a cluster on many nodes, you can clone your configured instance to new nodes. You won’t have to reinstall everything!
First create an image of your running instance and upload it to Google Cloud Storage:
```sh
# Create an image of your current instance
sudo /usr/bin/gcimagebundle -d /dev/sda -o /tmp/
# An image has been created in `/tmp` directory:
ls /tmp
e4686d7f5bf904a924ae0cfeb58d0827c6d5b966.image.tar.gz
# Upload your image to Google Cloud Storage:
# Create a bucket to hold your image, let's say `esimage`:
gsutil mb gs://esimage
# Copy your image to this bucket:
gsutil cp /tmp/e4686d7f5bf904a924ae0cfeb58d0827c6d5b966.image.tar.gz gs://esimage
# Then add your image to images collection:
gcloud compute images create elasticsearch-2-0-0 --source-uri gs://esimage/e4686d7f5bf904a924ae0cfeb58d0827c6d5b966.image.tar.gz
# If the previous command did not work for you, logout from your instance
# and launch the same command from your local machine.
```
## Start new instances [discovery-gce-usage-start-new-instances]
As you have now an image, you can create as many instances as you need:
```sh
# Just change node name (here myesnode2)
gcloud compute instances create myesnode2 --image elasticsearch-2-0-0 --zone europe-west1-a
# If you want to provide all details directly, you can use:
gcloud compute instances create myesnode2 --image=elasticsearch-2-0-0 \
--zone europe-west1-a --machine-type f1-micro --scopes=compute-rw
```
## Remove an instance (aka shut it down) [discovery-gce-usage-remove-instance]
You can use [Google Cloud Console](https://cloud.google.com/console) or CLI to manage your instances:
```sh
# Stopping and removing instances
gcloud compute instances delete myesnode1 myesnode2 \
--zone=europe-west1-a
# Consider removing disk as well if you don't need them anymore
gcloud compute disks delete boot-myesnode1 boot-myesnode2 \
--zone=europe-west1-a
```
|
unknown
|
github
|
https://github.com/elastic/elasticsearch
|
docs/reference/elasticsearch-plugins/discovery-gce-usage-cloning.md
|
import re
import pytz
import datetime
from billy.scrape.events import EventScraper, Event
from openstates.utils import LXMLMixin
class WYEventScraper(EventScraper, LXMLMixin):
jurisdiction = 'wy'
_tz = pytz.timezone('US/Mountain')
def normalize_time(self, time_string):
time_string = time_string.lower()
if re.search(r'(upon|after)(\?)? adjournment', time_string):
time_string = '12:00 am'
elif re.search(r'(noon (adjournment|recess)|afternoon)', time_string):
time_string = '12:00 pm'
if re.search(r'[ap]\.m\.', time_string):
ap = re.search(r'([ap])\.m\.', time_string).group(1)
time_string = time_string.replace(ap + '.m.', ap + 'm')
if re.search(r'[0-9]{1,2}:[0-9]{1,2}[ap]m', time_string):
hour_minutes, meridiem = re.search(
r'([0-9]{1,2}:[0-9]{1,2})([ap]m)', time_string).groups()
time_string = hour_minutes + ' ' + meridiem
if re.search(
r'^[0-9]{1,2}:[0-9]{1,2} [ap]m', time_string
) and not re.search(r'^[0-9]{1,2}:[0-9]{1,2} [ap]m$', time_string):
time_string = re.search(
r'^([0-9]{1,2}:[0-9]{1,2} [ap]m)', time_string).group(1)
if not re.search(r'^[0-9]{1,2}:[0-9]{1,2} [ap]m$', time_string):
# if at this point it doesn't match our format return 12:00 am
time_string = '12:00 am'
return time_string
def get_meeting_time(self, meeting_data):
meeting_time = meeting_data[0].xpath(
'.//p[@class="MsoNormal"]')[0].text_content().strip()
meeting_time = self.normalize_time(meeting_time)
return meeting_time
def get_committee(self, meeting_data):
committee = meeting_data[0].xpath(
'.//p[@class="MsoNormal"]')[1].text_content().strip()
if committee == '':
committee = None
else:
committee = re.sub(r'^[0-9]+-','',committee)
committee = self.clean_string(committee)
return committee
def get_location(self, meeting_data):
tr = meeting_data[0].xpath('.//p[@class="MsoNormal"]')
room = tr[len(tr)-1].text_content().strip()
room = self.clean_string(room)
if room == '':
room = None
return room
def get_meeting_description(self, meeting_data):
descriptions = ''
if len(meeting_data) > 1:
start_at = 1
else:
start_at = 0
for tr in meeting_data[start_at:]:
description = tr[len(tr)-2].text_content().strip()
descriptions += ' ' + description
descriptions = self.clean_string(descriptions).strip()
return descriptions
def get_bills(self, meeting_data):
bill_data = []
for tr in meeting_data:
bills = tr.xpath('.//a[contains(@href, "/Bills/")]')
if bills:
for bill in bills:
bill_id = bill.text_content().strip()
bill_description = self.clean_string(
tr.xpath('.//td[3]/p')[0].text_content().strip())
bill_url = bill.attrib['href'].strip() #pdf file
# dont include bad HTML links for bills. thankfully
# they're duplicates and already listed properly
if 'href' not in bill_url and '</a>' not in bill_url:
bill_data.append({
'bill_id': bill_id,
'bill_description' : bill_description,
'bill_url' : bill_url
})
return bill_data
def clean_string(self, my_string):
my_string = my_string.encode('ascii','ignore')
my_string = re.sub(r'(\n|\r\n)',' ', my_string)
my_string = re.sub(r'\s{2,}',' ', my_string)
my_string = my_string.strip()
return my_string
def is_row_a_new_meeting(self, row):
if len(row) == 3:
td1 = row.xpath('.//td[1]/p[@class="MsoNormal"]')
td2 = row.xpath('.//td[2]/p[@class="MsoNormal"]')
td3 = row.xpath('.//td[3]/p[@class="MsoNormal"]')
if len(td2) == 0:
td2 = row.xpath('.//td[2]/h1')
if len(td1) == 0 or len(td2) == 0:
return False
if (self.clean_string(td1[0].text_content()) == ''
or self.clean_string(td2[0].text_content()) == ''
or self.clean_string(td3[0].text_content()) == ''):
return False
else:
return False
return True
def scrape(self, chamber, session):
if chamber == 'other':
return
calendar_url = ("http://legisweb.state.wy.us/%s/Calendar/"
"CalendarMenu/CommitteeMenu.aspx" % str(session))
page = self.lxmlize(calendar_url)
rows = page.xpath('//table[@id="ctl00_cphContent_gvCalendars"]/tr')
for i,row in enumerate(rows):
row_ident = '%02d' % (i + 2)
date_xpath = ('.//span[@id="ctl00_cphContent_gv'
'Calendars_ctl%s_lblDate"]' % str(row_ident))
date_string = row.xpath(date_xpath)[0].text_content()
chamber_char = self.metadata['chambers'][chamber]['name'][0].upper()
meeting_xpath = ('.//a[@id="ctl00_cphContent_gv'
'Calendars_ctl%s_hl%scallink"]' % (
str(row_ident), chamber_char
))
meeting_url = row.xpath(meeting_xpath)
if (len(meeting_url) == 1 and
meeting_url[0].text_content().strip() != ''):
try:
meeting_url = meeting_url[0].attrib['href']
except KeyError:
self.warning(
"Alleged meeting date has no URL: " +
meeting_url[0].text_content().strip()
)
continue
meeting_page = self.lxmlize(meeting_url)
meetings = meeting_page.xpath(
'.//table[@class="MsoNormalTable"]/tr')
meeting_idents = []
meeting_ident = 0
# breaking the meetings into arrays (meeting_data) for
# processing. meeting_ident is the first row of the meeting
# (time, committee, location)
for meeting in meetings:
if self.is_row_a_new_meeting(meeting):
meeting_idents.append(meeting_ident)
meeting_ident += 1
for i,meeting_ident in enumerate(meeting_idents):
if len(meeting_idents) == 1 or i + 1 == len(meeting_idents):
ident_start, ident_end = [meeting_ident, 0]
meeting_data = meetings[ident_start:]
else:
ident_start, ident_end = [
meeting_ident, meeting_idents[i+1] - 1
]
if ident_end - ident_start == 1:
ident_end = ident_start + 2
meeting_data = meetings[ident_start:ident_end]
committee = self.get_committee(meeting_data)
meeting_time = self.get_meeting_time(meeting_data)
meeting_date_time = datetime.datetime.strptime(
date_string + ' ' + meeting_time, '%m/%d/%Y %I:%M %p')
meeting_date_time = self._tz.localize(meeting_date_time)
location = self.get_location(meeting_data)
description = self.get_meeting_description(meeting_data)
bills = self.get_bills(meeting_data)
if description == '':
description = committee
event = Event(
session,
meeting_date_time,
'committee:meeting',
description,
location
)
event.add_source(meeting_url)
for bill in bills:
if bill['bill_description'] == '':
bill['bill_description'] = committee
event.add_related_bill(
bill_id=bill['bill_id'],
description=bill['bill_description'],
type='consideration'
)
event.add_document(
name=bill['bill_id'],
url=bill['bill_url'],
type='bill',
mimetype='application/pdf'
)
event.add_participant(
type='host',
participant=committee,
participant_type='committee',
chamber=chamber
)
self.save_event(event)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.RequestHeader;
/**
* A response from the server. Contains both the body of the response as well as the correlated request
* metadata that was originally sent.
*/
public class ClientResponse {
private final RequestHeader requestHeader;
private final RequestCompletionHandler callback;
private final String destination;
private final long receivedTimeMs;
private final long latencyMs;
private final boolean disconnected;
private final boolean timedOut;
private final UnsupportedVersionException versionMismatch;
private final AuthenticationException authenticationException;
private final AbstractResponse responseBody;
/**
* @param requestHeader The header of the corresponding request
* @param callback The callback to be invoked
* @param destination The node the corresponding request was sent to
* @param createdTimeMs The unix timestamp when the corresponding request was created
* @param receivedTimeMs The unix timestamp when this response was received
* @param disconnected Whether the client disconnected before fully reading a response
* @param versionMismatch Whether there was a version mismatch that prevented sending the request.
* @param responseBody The response contents (or null) if we disconnected, no response was expected,
* or if there was a version mismatch.
*/
public ClientResponse(RequestHeader requestHeader,
RequestCompletionHandler callback,
String destination,
long createdTimeMs,
long receivedTimeMs,
boolean disconnected,
UnsupportedVersionException versionMismatch,
AuthenticationException authenticationException,
AbstractResponse responseBody) {
this(requestHeader,
callback,
destination,
createdTimeMs,
receivedTimeMs,
disconnected,
false,
versionMismatch,
authenticationException,
responseBody);
}
/**
* @param requestHeader The header of the corresponding request
* @param callback The callback to be invoked
* @param destination The node the corresponding request was sent to
* @param createdTimeMs The unix timestamp when the corresponding request was created
* @param receivedTimeMs The unix timestamp when this response was received
* @param disconnected Whether the client disconnected before fully reading a response
* @param timedOut Whether the client was disconnected because of a timeout; when setting this
* to <code>true</code>, <code>disconnected</code> must be <code>true</code>
* or an {@link IllegalStateException} will be thrown
* @param versionMismatch Whether there was a version mismatch that prevented sending the request.
* @param responseBody The response contents (or null) if we disconnected, no response was expected,
* or if there was a version mismatch.
*/
public ClientResponse(RequestHeader requestHeader,
RequestCompletionHandler callback,
String destination,
long createdTimeMs,
long receivedTimeMs,
boolean disconnected,
boolean timedOut,
UnsupportedVersionException versionMismatch,
AuthenticationException authenticationException,
AbstractResponse responseBody) {
if (!disconnected && timedOut)
throw new IllegalStateException("The client response can't be in the state of connected, yet timed out");
this.requestHeader = requestHeader;
this.callback = callback;
this.destination = destination;
this.receivedTimeMs = receivedTimeMs;
this.latencyMs = receivedTimeMs - createdTimeMs;
this.disconnected = disconnected;
this.timedOut = timedOut;
this.versionMismatch = versionMismatch;
this.authenticationException = authenticationException;
this.responseBody = responseBody;
}
public long receivedTimeMs() {
return receivedTimeMs;
}
public boolean wasDisconnected() {
return disconnected;
}
public boolean wasTimedOut() {
return timedOut;
}
public UnsupportedVersionException versionMismatch() {
return versionMismatch;
}
public AuthenticationException authenticationException() {
return authenticationException;
}
public RequestHeader requestHeader() {
return requestHeader;
}
public String destination() {
return destination;
}
public AbstractResponse responseBody() {
return responseBody;
}
public boolean hasResponse() {
return responseBody != null;
}
public long requestLatencyMs() {
return latencyMs;
}
public void onComplete() {
if (callback != null)
callback.onComplete(this);
}
@Override
public String toString() {
return "ClientResponse(receivedTimeMs=" + receivedTimeMs +
", latencyMs=" +
latencyMs +
", disconnected=" +
disconnected +
", timedOut=" +
timedOut +
", requestHeader=" +
requestHeader +
", responseBody=" +
responseBody +
")";
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/clients/ClientResponse.java
|
/*
* Copyright (C) 2007 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.util.concurrent;
import static com.google.common.base.Preconditions.checkNotNull;
import com.google.common.annotations.GwtIncompatible;
import com.google.errorprone.annotations.concurrent.GuardedBy;
import java.util.concurrent.Executor;
import java.util.logging.Level;
import org.jspecify.annotations.Nullable;
/**
* A support class for {@code ListenableFuture} implementations to manage their listeners. An
* instance contains a list of listeners, each with an associated {@code Executor}, and guarantees
* that every {@code Runnable} that is {@linkplain #add added} will be executed after {@link
* #execute()} is called. Any {@code Runnable} added after the call to {@code execute} is still
* guaranteed to execute. There is no guarantee, however, that listeners will be executed in the
* order that they are added.
*
* <p>Exceptions thrown by a listener will be propagated up to the executor. Any exception thrown
* during {@code Executor.execute} (e.g., a {@code RejectedExecutionException} or an exception
* thrown by {@linkplain MoreExecutors#directExecutor direct execution}) will be caught and logged.
*
* @author Nishant Thakkar
* @author Sven Mawson
* @since 1.0
*/
@GwtIncompatible
public final class ExecutionList {
/** Logger to log exceptions caught when running runnables. */
private static final LazyLogger log = new LazyLogger(ExecutionList.class);
private final Object lock = new Object();
/**
* The runnable, executor pairs to execute. This acts as a stack threaded through the {@link
* RunnableExecutorPair#next} field.
*/
@GuardedBy("lock")
private @Nullable RunnableExecutorPair runnables;
@GuardedBy("lock")
private boolean executed;
/** Creates a new, empty {@link ExecutionList}. */
public ExecutionList() {}
/**
* Adds the {@code Runnable} and accompanying {@code Executor} to the list of listeners to
* execute. If execution has already begun, the listener is executed immediately.
*
* <p>When selecting an executor, note that {@code directExecutor} is dangerous in some cases. See
* the discussion in the {@link ListenableFuture#addListener ListenableFuture.addListener}
* documentation.
*/
public void add(Runnable runnable, Executor executor) {
// Fail fast on a null. We throw NPE here because the contract of Executor states that it throws
// NPE on null listener, so we propagate that contract up into the add method as well.
checkNotNull(runnable, "Runnable was null.");
checkNotNull(executor, "Executor was null.");
// Lock while we check state. We must maintain the lock while adding the new pair so that
// another thread can't run the list out from under us. We only add to the list if we have not
// yet started execution.
synchronized (lock) {
if (!executed) {
runnables = new RunnableExecutorPair(runnable, executor, runnables);
return;
}
}
// Execute the runnable immediately. Because of scheduling this may end up getting called before
// some of the previously added runnables, but we're OK with that. If we want to change the
// contract to guarantee ordering among runnables we'd have to modify the logic here to allow
// it.
executeListener(runnable, executor);
}
/**
* Runs this execution list, executing all existing pairs in the order they were added. However,
* note that listeners added after this point may be executed before those previously added, and
* note that the execution order of all listeners is ultimately chosen by the implementations of
* the supplied executors.
*
* <p>This method is idempotent. Calling it several times in parallel is semantically equivalent
* to calling it exactly once.
*
* @since 10.0 (present in 1.0 as {@code run})
*/
public void execute() {
// Lock while we update our state so the add method above will finish adding any listeners
// before we start to run them.
RunnableExecutorPair list;
synchronized (lock) {
if (executed) {
return;
}
executed = true;
list = runnables;
runnables = null; // allow GC to free listeners even if this stays around for a while.
}
// If we succeeded then list holds all the runnables we to execute. The pairs in the stack are
// in the opposite order from how they were added so we need to reverse the list to fulfill our
// contract.
// This is somewhat annoying, but turns out to be very fast in practice. Alternatively, we could
// drop the contract on the method that enforces this queue like behavior since depending on it
// is likely to be a bug anyway.
// N.B. All writes to the list and the next pointers must have happened before the above
// synchronized block, so we can iterate the list without the lock held here.
RunnableExecutorPair reversedList = null;
while (list != null) {
RunnableExecutorPair tmp = list;
list = list.next;
tmp.next = reversedList;
reversedList = tmp;
}
while (reversedList != null) {
executeListener(reversedList.runnable, reversedList.executor);
reversedList = reversedList.next;
}
}
/**
* Submits the given runnable to the given {@link Executor} catching and logging all {@linkplain
* RuntimeException runtime exceptions} thrown by the executor.
*/
@SuppressWarnings("CatchingUnchecked") // sneaky checked exception
private static void executeListener(Runnable runnable, Executor executor) {
try {
executor.execute(runnable);
} catch (Exception e) { // sneaky checked exception
// Log it and keep going -- bad runnable and/or executor. Don't punish the other runnables if
// we're given a bad one. We only catch Exception because we want Errors to propagate up.
log.get()
.log(
Level.SEVERE,
"RuntimeException while executing runnable "
+ runnable
+ " with executor "
+ executor,
e);
}
}
private static final class RunnableExecutorPair {
final Runnable runnable;
final Executor executor;
@Nullable RunnableExecutorPair next;
RunnableExecutorPair(
Runnable runnable, Executor executor, @Nullable RunnableExecutorPair next) {
this.runnable = runnable;
this.executor = executor;
this.next = next;
}
}
}
|
java
|
github
|
https://github.com/google/guava
|
android/guava/src/com/google/common/util/concurrent/ExecutionList.java
|
/*
* Copyright (C) 2012 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.hash;
import com.google.caliper.BeforeExperiment;
import com.google.caliper.Benchmark;
import com.google.caliper.Param;
import java.util.Random;
import org.jspecify.annotations.NullUnmarked;
/**
* Benchmarks for comparing the various {@link HashFunction functions} that we provide.
*
* <p>Parameters for the benchmark are:
*
* <ul>
* <li>size: The length of the byte array to hash.
* <li>hashFunctionEnum: The {@link HashFunction} to use for hashing.
* </ul>
*
* @author Kurt Alfred Kluever
*/
@NullUnmarked
public class HashFunctionBenchmark {
// Use a statically configured random instance for all of the benchmarks
private static final Random random = new Random(42);
@Param({"10", "1000", "100000", "1000000"})
private int size;
@Param HashFunctionEnum hashFunctionEnum;
private byte[] testBytes;
@BeforeExperiment
void setUp() {
testBytes = new byte[size];
random.nextBytes(testBytes);
}
@Benchmark
int hasher(int reps) {
HashFunction hashFunction = hashFunctionEnum.getHashFunction();
int result = 37;
for (int i = 0; i < reps; i++) {
result ^= hashFunction.newHasher().putBytes(testBytes).hash().asBytes()[0];
}
return result;
}
@Benchmark
int hashFunction(int reps) {
HashFunction hashFunction = hashFunctionEnum.getHashFunction();
int result = 37;
for (int i = 0; i < reps; i++) {
result ^= hashFunction.hashBytes(testBytes).asBytes()[0];
}
return result;
}
@Benchmark
int hashFunctionWithOffset(int reps) {
HashFunction hashFunction = hashFunctionEnum.getHashFunction();
int result = 37;
for (int i = 0; i < reps; i++) {
result ^= hashFunction.hashBytes(testBytes, 1, testBytes.length - 1).asBytes()[0];
}
return result;
}
}
|
java
|
github
|
https://github.com/google/guava
|
android/guava-tests/benchmark/com/google/common/hash/HashFunctionBenchmark.java
|
# Copyright (C) 2008, One Laptop Per Child
# Copyright (C) 2009, Tomeu Vizoso
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gettext import gettext as _
from gettext import ngettext
import locale
import logging
from gi.repository import GObject
from gi.repository import Gtk
from sugar3.graphics import style
from sugar3.graphics.icon import Icon, CellRendererIcon
from jarabe.controlpanel.sectionview import SectionView
from model import UpdateModel
_DEBUG_VIEW_ALL = True
class ActivityUpdater(SectionView):
def __init__(self, model, alerts):
SectionView.__init__(self)
self._model = UpdateModel()
self._model.connect('progress', self.__progress_cb)
self.set_spacing(style.DEFAULT_SPACING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._top_label = Gtk.Label()
self._top_label.set_line_wrap(True)
self._top_label.set_justify(Gtk.Justification.LEFT)
self._top_label.props.xalign = 0
self.pack_start(self._top_label, False, True, 0)
self._top_label.show()
separator = Gtk.HSeparator()
self.pack_start(separator, False, True, 0)
separator.show()
bottom_label = Gtk.Label()
bottom_label.set_line_wrap(True)
bottom_label.set_justify(Gtk.Justification.LEFT)
bottom_label.props.xalign = 0
bottom_label.set_markup(
_('Software updates correct errors, eliminate security ' \
'vulnerabilities, and provide new features.'))
self.pack_start(bottom_label, False, True, 0)
bottom_label.show()
self._update_box = None
self._progress_pane = None
self._refresh()
def _switch_to_update_box(self):
if self._update_box in self.get_children():
return
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box is None:
self._update_box = UpdateBox(self._model)
self._update_box.refresh_button.connect('clicked',
self.__refresh_button_clicked_cb)
self._update_box.install_button.connect('clicked',
self.__install_button_clicked_cb)
self.pack_start(self._update_box, expand=True, fill=True, padding=0)
self._update_box.show()
def _switch_to_progress_pane(self):
if self._progress_pane in self.get_children():
return
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
if self._progress_pane is None:
self._progress_pane = ProgressPane()
self._progress_pane.cancel_button.connect('clicked',
self.__cancel_button_clicked_cb)
self.pack_start(self._progress_pane, expand=True, fill=False, padding=0)
self._progress_pane.show()
def _clear_center(self):
if self._progress_pane in self.get_children():
self.remove(self._progress_pane)
self._progress_pane = None
if self._update_box in self.get_children():
self.remove(self._update_box)
self._update_box = None
def __progress_cb(self, model, action, bundle_name, current, total):
if current == total and action == UpdateModel.ACTION_CHECKING:
self._finished_checking()
return
elif current == total:
self._finished_updating(int(current))
return
if action == UpdateModel.ACTION_CHECKING:
message = _('Checking %s...') % bundle_name
elif action == UpdateModel.ACTION_DOWNLOADING:
message = _('Downloading %s...') % bundle_name
elif action == UpdateModel.ACTION_UPDATING:
message = _('Updating %s...') % bundle_name
self._switch_to_progress_pane()
self._progress_pane.set_message(message)
self._progress_pane.set_progress(current / float(total))
def _finished_checking(self):
logging.debug('ActivityUpdater._finished_checking')
available_updates = len(self._model.updates)
if not available_updates:
top_message = _('Your software is up-to-date')
else:
top_message = ngettext('You can install %s update',
'You can install %s updates',
available_updates)
top_message = top_message % available_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
if not available_updates:
self._clear_center()
else:
self._switch_to_update_box()
self._update_box.refresh()
def __refresh_button_clicked_cb(self, button):
self._refresh()
def _refresh(self):
top_message = _('Checking for updates...')
self._top_label.set_markup('<big>%s</big>' % top_message)
self._model.check_updates()
def __install_button_clicked_cb(self, button):
text = '<big>%s</big>' % _('Installing updates...')
self._top_label.set_markup(text)
self._model.update(self._update_box.get_bundles_to_update())
def __cancel_button_clicked_cb(self, button):
self._model.cancel()
def _finished_updating(self, installed_updates):
logging.debug('ActivityUpdater._finished_updating')
top_message = ngettext('%s update was installed',
'%s updates were installed', installed_updates)
top_message = top_message % installed_updates
top_message = GObject.markup_escape_text(top_message)
self._top_label.set_markup('<big>%s</big>' % top_message)
self._clear_center()
def undo(self):
self._model.cancel()
class ProgressPane(Gtk.VBox):
"""Container which replaces the `ActivityPane` during refresh or
install."""
def __init__(self):
Gtk.VBox.__init__(self)
self.set_spacing(style.DEFAULT_PADDING)
self.set_border_width(style.DEFAULT_SPACING * 2)
self._progress = Gtk.ProgressBar()
self.pack_start(self._progress, True, True, 0)
self._progress.show()
self._label = Gtk.Label()
self._label.set_line_wrap(True)
self._label.set_property('xalign', 0.5)
self._label.modify_fg(Gtk.StateType.NORMAL,
style.COLOR_BUTTON_GREY.get_gdk_color())
self.pack_start(self._label, True, True, 0)
self._label.show()
alignment_box = Gtk.Alignment.new(xalign=0.5, yalign=0.5,
xscale=0, yscale=0)
self.pack_start(alignment_box, True, True, 0)
alignment_box.show()
self.cancel_button = Gtk.Button(stock=Gtk.STOCK_CANCEL)
alignment_box.add(self.cancel_button)
self.cancel_button.show()
def set_message(self, message):
self._label.set_text(message)
def set_progress(self, fraction):
self._progress.props.fraction = fraction
class UpdateBox(Gtk.VBox):
def __init__(self, model):
Gtk.VBox.__init__(self)
self._model = model
self.set_spacing(style.DEFAULT_PADDING)
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
self.pack_start(scrolled_window, True, True, 0)
scrolled_window.show()
self._update_list = UpdateList(model)
self._update_list.props.model.connect('row-changed',
self.__row_changed_cb)
scrolled_window.add(self._update_list)
self._update_list.show()
bottom_box = Gtk.HBox()
bottom_box.set_spacing(style.DEFAULT_SPACING)
self.pack_start(bottom_box, False, True, 0)
bottom_box.show()
self._size_label = Gtk.Label()
self._size_label.props.xalign = 0
self._size_label.set_justify(Gtk.Justification.LEFT)
bottom_box.pack_start(self._size_label, True, True, 0)
self._size_label.show()
self.refresh_button = Gtk.Button(stock=Gtk.STOCK_REFRESH)
bottom_box.pack_start(self.refresh_button, False, True, 0)
self.refresh_button.show()
self.install_button = Gtk.Button(_('Install selected'))
self.install_button.props.image = Icon(icon_name='emblem-downloads',
icon_size=Gtk.IconSize.BUTTON)
bottom_box.pack_start(self.install_button, False, True, 0)
self.install_button.show()
self._update_total_size_label()
def refresh(self):
self._update_list.refresh()
def __row_changed_cb(self, list_model, path, iterator):
self._update_total_size_label()
self._update_install_button()
def _update_total_size_label(self):
total_size = 0
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
total_size += row[UpdateListModel.SIZE]
markup = _('Download size: %s') % _format_size(total_size)
self._size_label.set_markup(markup)
def _update_install_button(self):
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
self.install_button.props.sensitive = True
return
self.install_button.props.sensitive = False
def get_bundles_to_update(self):
bundles_to_update = []
for row in self._update_list.props.model:
if row[UpdateListModel.SELECTED]:
bundles_to_update.append(row[UpdateListModel.BUNDLE_ID])
return bundles_to_update
class UpdateList(Gtk.TreeView):
def __init__(self, model):
list_model = UpdateListModel(model)
Gtk.TreeView.__init__(self, list_model)
self.set_reorderable(False)
self.set_enable_search(False)
self.set_headers_visible(False)
toggle_renderer = Gtk.CellRendererToggle()
toggle_renderer.props.activatable = True
toggle_renderer.props.xpad = style.DEFAULT_PADDING
toggle_renderer.props.indicator_size = style.zoom(26)
toggle_renderer.connect('toggled', self.__toggled_cb)
toggle_column = Gtk.TreeViewColumn()
toggle_column.pack_start(toggle_renderer, True)
toggle_column.add_attribute(toggle_renderer, 'active',
UpdateListModel.SELECTED)
self.append_column(toggle_column)
icon_renderer = CellRendererIcon(self)
icon_renderer.props.width = style.STANDARD_ICON_SIZE
icon_renderer.props.height = style.STANDARD_ICON_SIZE
icon_renderer.props.size = style.STANDARD_ICON_SIZE
icon_renderer.props.xpad = style.DEFAULT_PADDING
icon_renderer.props.ypad = style.DEFAULT_PADDING
icon_renderer.props.stroke_color = style.COLOR_TOOLBAR_GREY.get_svg()
icon_renderer.props.fill_color = style.COLOR_TRANSPARENT.get_svg()
icon_column = Gtk.TreeViewColumn()
icon_column.pack_start(icon_renderer, True)
icon_column.add_attribute(icon_renderer, 'file-name',
UpdateListModel.ICON_FILE_NAME)
self.append_column(icon_column)
text_renderer = Gtk.CellRendererText()
description_column = Gtk.TreeViewColumn()
description_column.pack_start(text_renderer, True)
description_column.add_attribute(text_renderer, 'markup',
UpdateListModel.DESCRIPTION)
self.append_column(description_column)
def __toggled_cb(self, cell_renderer, path):
row = self.props.model[path]
row[UpdateListModel.SELECTED] = not row[UpdateListModel.SELECTED]
def refresh(self):
pass
class UpdateListModel(Gtk.ListStore):
BUNDLE_ID = 0
SELECTED = 1
ICON_FILE_NAME = 2
DESCRIPTION = 3
SIZE = 4
def __init__(self, model):
Gtk.ListStore.__init__(self, str, bool, str, str, int)
for bundle_update in model.updates:
row = [None] * 5
row[self.BUNDLE_ID] = bundle_update.bundle.get_bundle_id()
row[self.SELECTED] = True
row[self.ICON_FILE_NAME] = bundle_update.bundle.get_icon()
details = _('From version %(current)s to %(new)s (Size: %(size)s)')
details = details % \
{'current': bundle_update.bundle.get_activity_version(),
'new': bundle_update.version,
'size': _format_size(bundle_update.size)}
row[self.DESCRIPTION] = '<b>%s</b>\n%s' % \
(bundle_update.bundle.get_name(), details)
row[self.SIZE] = bundle_update.size
self.append(row)
def _format_size(size):
"""Convert a given size in bytes to a nicer better readable unit"""
if size == 0:
# TRANS: download size is 0
return _('None')
elif size < 1024:
# TRANS: download size of very small updates
return _('1 KB')
elif size < 1024 * 1024:
# TRANS: download size of small updates, e.g. '250 KB'
return locale.format_string(_('%.0f KB'), size / 1024.0)
else:
# TRANS: download size of updates, e.g. '2.3 MB'
return locale.format_string(_('%.1f MB'), size / 1024.0 / 1024)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/**
* Implementation package for JSR-107 (javax.cache aka "JCache") based caches.
* Provides a {@link org.springframework.cache.CacheManager CacheManager}
* and {@link org.springframework.cache.Cache Cache} implementation for
* use in a Spring context, using a JSR-107 compliant cache provider.
*/
@NullMarked
package org.springframework.cache.jcache;
import org.jspecify.annotations.NullMarked;
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-context-support/src/main/java/org/springframework/cache/jcache/package-info.java
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
garbagecollectorconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config"
)
// GarbageCollectorControllerOptions holds the GarbageCollectorController options.
type GarbageCollectorControllerOptions struct {
*garbagecollectorconfig.GarbageCollectorControllerConfiguration
}
// AddFlags adds flags related to GarbageCollectorController for controller manager to the specified FlagSet.
func (o *GarbageCollectorControllerOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
fs.Int32Var(&o.ConcurrentGCSyncs, "concurrent-gc-syncs", o.ConcurrentGCSyncs, "The number of garbage collector workers that are allowed to sync concurrently.")
fs.BoolVar(&o.EnableGarbageCollector, "enable-garbage-collector", o.EnableGarbageCollector, "Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.")
}
// ApplyTo fills up GarbageCollectorController config with options.
func (o *GarbageCollectorControllerOptions) ApplyTo(cfg *garbagecollectorconfig.GarbageCollectorControllerConfiguration) error {
if o == nil {
return nil
}
cfg.ConcurrentGCSyncs = o.ConcurrentGCSyncs
cfg.GCIgnoredResources = o.GCIgnoredResources
cfg.EnableGarbageCollector = o.EnableGarbageCollector
return nil
}
// Validate checks validation of GarbageCollectorController.
func (o *GarbageCollectorControllerOptions) Validate() []error {
if o == nil {
return nil
}
errs := []error{}
return errs
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
cmd/kube-controller-manager/app/options/garbagecollectorcontroller.go
|
#!/usr/bin/python
#
# cim.py - Copyright (C) 2010 Red Hat, Inc.
# Written by Mike Burns <mburns@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA. A copy of the GNU General Public License is
# also available at http://www.gnu.org/copyleft/gpl.html.
from ovirtnode.ovirtfunctions import *
from ovirtnode.password import *
from snack import *
import _snack
import grp
import pwd
def enable_cim():
augtool("set", "/files/etc/default/ovirt/OVIRT_CIM_ENABLED", "1")
if system("service ovirt-cim restart"):
return True
def disable_cim():
augtool("set", "/files/etc/default/ovirt/OVIRT_CIM_ENABLED", "0")
if system("service ovirt-cim restart &> /dev/null"):
return True
class Plugin(PluginBase):
"""Plugin for Monitoring configuration option.
"""
valid_password = False
valid_password_msg = ""
def __init__(self, ncs):
PluginBase.__init__(self, "CIM", ncs)
self.username = "cim"
self.shell = "/sbin/nologin"
self.main_group = "cim"
self.group_list = ["sfcb"]
def form(self):
elements = Grid(2, 9)
heading = Label("CIM Configuation")
if is_console():
heading.setColors(customColorset(1))
elements.setField(heading, 0, 0, anchorLeft=1)
pw_elements = Grid(3, 3)
if is_cim_enabled():
self.current_cim_status = 1
else:
self.current_cim_status = 0
self.cim_status = Checkbox("Enable CIM", isOn=self.current_cim_status)
elements.setField(self.cim_status, 0, 1, anchorLeft=1)
aug.load()
local_heading = Label("CIM Access")
if is_console():
local_heading.setColors(customColorset(1))
elements.setField(local_heading, 0, 3, anchorLeft=1,
padding=(0, 2, 0, 0))
elements.setField(Label(" "), 0, 6)
pw_elements.setField(Label("Password: "), 0, 1, anchorLeft=1)
pw_elements.setField(Label("Confirm Password: "), 0, 2, anchorLeft=1)
self.cim_password_1 = Entry(15, password=1)
self.cim_password_1.setCallback(self.password_check_callback)
self.cim_password_2 = Entry(15, password=1)
self.cim_password_2.setCallback(self.password_check_callback)
pw_elements.setField(self.cim_password_1, 1, 1)
pw_elements.setField(self.cim_password_2, 1, 2)
self.pw_msg = Textbox(60, 6, "", wrap=1)
elements.setField(pw_elements, 0, 7, anchorLeft=1)
elements.setField(self.pw_msg, 0, 8, padding=(0, 1, 0, 0))
return [Label(""), elements]
def action(self):
self.ncs.screen.setColor("BUTTON", "black", "red")
self.ncs.screen.setColor("ACTBUTTON", "blue", "white")
is_transition_to_disabled = (self.cim_status.value() == 0 and
self.current_cim_status == 1)
is_transition_to_enabled = (self.cim_status.value() == 1 and
self.current_cim_status == 0)
is_enabled = self.cim_status.value() == 1
setting_password_failed = False
if (len(self.cim_password_1.value()) > 0 or
len(self.cim_password_2.value()) > 0):
if is_enabled:
setting_password_failed = self.__set_cim_password()
if setting_password_failed:
ButtonChoiceWindow(self.ncs.screen, "CIM Configuration",
"Unable to Set CIM Password", buttons=['Ok'])
self.ncs.reset_screen_colors()
return False
else:
ButtonChoiceWindow(self.ncs.screen, "CIM Configuration",
"CIM Must Be Enabled to Set Password", buttons=['Ok'])
self.ncs.reset_screen_colors()
return False
if is_transition_to_disabled:
if disable_cim():
ButtonChoiceWindow(self.ncs.screen, "CIM Configuration",
"CIM Successfully Disabled", buttons=['Ok'])
self.ncs.reset_screen_colors()
return True
elif is_transition_to_enabled or is_enabled:
if len(self.cim_password_1.value()) > 0:
if enable_cim():
ButtonChoiceWindow(self.ncs.screen, "CIM Configuration",
"CIM Successfully Enabled", buttons=['Ok'])
self.ncs.reset_screen_colors()
else:
ButtonChoiceWindow(self.ncs.screen, "CIM Configuration",
"CIM Configuration Failed", buttons=['Ok'])
self.ncs.reset_screen_colors()
else:
ButtonChoiceWindow(self.ncs.screen, "CIM Configuration",
"Please Enter a Password", buttons=['Ok'])
self.ncs.reset_screen_colors()
def __set_cim_password(self):
msg = None
failed = True
self.create_cim_user()
if self.valid_password:
if set_password(self.cim_password_1.value(), "cim"):
msg = "CIM Password Successfully Set"
failed = False
else:
msg = "CIM Password Failed"
else:
self.ncs._create_warn_screen()
msg = "CIM Password Is Invalid: %s" % self.valid_password_msg
ButtonChoiceWindow(self.ncs.screen, "CIM Access", msg,
buttons=['Ok'])
self.ncs.reset_screen_colors()
return failed
def password_check_callback(self):
resp, msg = password_check(self.cim_password_1.value(),
self.cim_password_2.value())
self.pw_msg.setText(msg)
self.valid_password = resp == 0
self.valid_password_msg = msg
return
def create_cim_user(self):
if not check_user_exists(self.username):
add_user(self.username, self.shell, self.main_group, self.group_list)
else:
userinfo = pwd.getpwnam(self.username)
if not userinfo.pw_gid == grp.getgrnam(self.main_group).gr_gid:
system_closefds("usermod -g %s %s" % (self.main_group,
self.username))
if not userinfo.pw_shell == self.shell:
system_closefds("usermod -s %s %s" % (self.shell,
self.username))
for group in self.group_list:
if self.username not in grp.getgrnam(group).gr_mem:
system_closefds("usermod -G %s %s" % (self.group_list.join(",",
self.username)))
break
def get_plugin(ncs):
return Plugin(ncs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.context.index.sample.jpa;
import jakarta.persistence.Embeddable;
/**
* Test candidate for {@link Embeddable}.
*
* @author Stephane Nicoll
*/
@Embeddable
public class SampleEmbeddable {
}
|
java
|
github
|
https://github.com/spring-projects/spring-framework
|
spring-context-indexer/src/test/java/org/springframework/context/index/sample/jpa/SampleEmbeddable.java
|
# TensorFlow C API
- See [www.tensorflow.org/install/lang_c](https://www.tensorflow.org/install/lang_c)
- Nightly builds:
- [Linux CPU-only](https://storage.googleapis.com/tensorflow-nightly/github/tensorflow/lib_package/libtensorflow-cpu-linux-x86_64.tar.gz)
- [Linux GPU](https://storage.googleapis.com/tensorflow-nightly/github/tensorflow/lib_package/libtensorflow-gpu-linux-x86_64.tar.gz)
- [MacOS CPU-only](https://storage.googleapis.com/tensorflow-nightly/github/tensorflow/lib_package/libtensorflow-cpu-darwin-x86_64.tar.gz)
|
unknown
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/c/README.md
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchNorm Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import distributions
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.batch_normalization import BatchNormalization
from tensorflow.contrib.distributions.python.ops.bijectors.invert import Invert
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.layers import normalization
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class BatchNormTest(test_util.VectorDistributionTestHelpers,
test.TestCase):
def _reduction_axes(self, input_shape, event_dims):
if isinstance(event_dims, int):
event_dims = [event_dims]
ndims = len(input_shape)
# Convert event_dims to non-negative indexing.
event_dims = list(event_dims)
for idx, x in enumerate(event_dims):
if x < 0:
event_dims[idx] = ndims + x
return tuple(i for i in range(ndims) if i not in event_dims)
def testForwardInverse(self):
"""Tests forward and backward passes with different event shapes.
input_shape: Tuple of shapes for input tensor.
event_dims: Tuple of dimension indices that will be normalized.
training: Boolean of whether bijector runs in training or inference mode.
"""
params = [
((5*2, 4), [-1], False),
((5, 2, 4), [-1], False),
((5, 2, 4), [1, 2], False),
((5, 2, 4), [0, 1], False),
((5*2, 4), [-1], True),
((5, 2, 4), [-1], True),
((5, 2, 4), [1, 2], True),
((5, 2, 4), [0, 1], True)
]
for input_shape, event_dims, training in params:
x_ = np.arange(5 * 4 * 2).astype(np.float32).reshape(input_shape)
with self.cached_session() as sess:
x = constant_op.constant(x_)
# When training, memorize the exact mean of the last
# minibatch that it normalized (instead of moving average assignment).
layer = normalization.BatchNormalization(
axis=event_dims, momentum=0., epsilon=0.)
batch_norm = BatchNormalization(
batchnorm_layer=layer, training=training)
# Minibatch statistics are saved only after norm_x has been computed.
norm_x = batch_norm.inverse(x)
with ops.control_dependencies(batch_norm.batchnorm.updates):
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
denorm_x = batch_norm.forward(array_ops.identity(norm_x))
fldj = batch_norm.forward_log_det_jacobian(
x, event_ndims=len(event_dims))
# Use identity to invalidate cache.
ildj = batch_norm.inverse_log_det_jacobian(
array_ops.identity(denorm_x), event_ndims=len(event_dims))
variables.global_variables_initializer().run()
# Update variables.
norm_x_ = sess.run(norm_x)
[
norm_x_,
moving_mean_,
moving_var_,
denorm_x_,
ildj_,
fldj_,
] = sess.run([
norm_x,
moving_mean,
moving_var,
denorm_x,
ildj,
fldj,
])
self.assertEqual("batch_normalization", batch_norm.name)
reduction_axes = self._reduction_axes(input_shape, event_dims)
keepdims = len(event_dims) > 1
expected_batch_mean = np.mean(
x_, axis=reduction_axes, keepdims=keepdims)
expected_batch_var = np.var(x_, axis=reduction_axes, keepdims=keepdims)
if training:
# When training=True, values become normalized across batch dim and
# original values are recovered after de-normalizing.
zeros = np.zeros_like(norm_x_)
self.assertAllClose(np.mean(zeros, axis=reduction_axes),
np.mean(norm_x_, axis=reduction_axes))
self.assertAllClose(expected_batch_mean, moving_mean_)
self.assertAllClose(expected_batch_var, moving_var_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# Since moving statistics are set to batch statistics after
# normalization, ildj and -fldj should match.
self.assertAllClose(ildj_, -fldj_)
# ildj is computed with minibatch statistics.
expected_ildj = np.sum(np.log(1.) - .5 * np.log(
expected_batch_var + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
else:
# When training=False, moving_mean, moving_var remain at their
# initialized values (0., 1.), resulting in no scale/shift (a small
# shift occurs if epsilon > 0.)
self.assertAllClose(x_, norm_x_)
self.assertAllClose(x_, denorm_x_, atol=1e-5)
# ildj is computed with saved statistics.
expected_ildj = np.sum(
np.log(1.) - .5 * np.log(1. + batch_norm.batchnorm.epsilon))
self.assertAllClose(expected_ildj, ildj_)
def testMaximumLikelihoodTraining(self):
# Test Maximum Likelihood training with default bijector.
with self.cached_session() as sess:
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
batch_norm = BatchNormalization(training=True)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm)
target_dist = distributions.MultivariateNormalDiag(loc=[1., 2.])
target_samples = target_dist.sample(100)
dist_samples = dist.sample(3000)
loss = -math_ops.reduce_mean(dist.log_prob(target_samples))
with ops.control_dependencies(batch_norm.batchnorm.updates):
train_op = adam.AdamOptimizer(1e-2).minimize(loss)
moving_mean = array_ops.identity(batch_norm.batchnorm.moving_mean)
moving_var = array_ops.identity(batch_norm.batchnorm.moving_variance)
variables.global_variables_initializer().run()
for _ in range(3000):
sess.run(train_op)
[
dist_samples_,
moving_mean_,
moving_var_
] = sess.run([
dist_samples,
moving_mean,
moving_var
])
self.assertAllClose([1., 2.], np.mean(dist_samples_, axis=0), atol=5e-2)
self.assertAllClose([1., 2.], moving_mean_, atol=5e-2)
self.assertAllClose([1., 1.], moving_var_, atol=5e-2)
def testLogProb(self):
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
base_dist = distributions.MultivariateNormalDiag(loc=[0., 0.])
dist = transformed_distribution_lib.TransformedDistribution(
distribution=base_dist,
bijector=batch_norm,
validate_args=True)
samples = dist.sample(int(1e5))
# No volume distortion since training=False, bijector is initialized
# to the identity transformation.
base_log_prob = base_dist.log_prob(samples)
dist_log_prob = dist.log_prob(samples)
variables.global_variables_initializer().run()
base_log_prob_, dist_log_prob_ = sess.run([base_log_prob, dist_log_prob])
self.assertAllClose(base_log_prob_, dist_log_prob_)
def testMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = BatchNormalization(batchnorm_layer=layer, training=False)
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
def testInvertMutuallyConsistent(self):
# BatchNorm bijector is only mutually consistent when training=False.
dims = 4
with self.cached_session() as sess:
layer = normalization.BatchNormalization(epsilon=0.)
batch_norm = Invert(
BatchNormalization(batchnorm_layer=layer, training=False))
dist = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=batch_norm,
event_shape=[dims],
validate_args=True)
self.run_test_sample_consistent_log_prob(
sess_run_fn=sess.run,
dist=dist,
num_samples=int(1e5),
radius=2.,
center=0.,
rtol=0.02)
if __name__ == "__main__":
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding:utf8
from __future__ import print_function
class Property(object):
"
Emulate PyProperty_Type() in Objects/descrobject.c
该类时Property的python实现方式
参考:https://stackoverflow.com/questions/17330160/how-does-the-property-decorator-work
"
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute")
return self.fget(obj)
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError("can't set attribute")
self.fset(obj, value)
def __delete__(self, obj):
if self.fdel is None:
raise AttributeError("can't delete attribute")
self.fdel(obj)
def getter(self, fget):
"""通过装饰器语法糖来修饰欲装饰属性,返回一个property实例对象本身"""
return type(self)(fget, self.fset, self.fdel, self.__doc__)
def setter(self, fset):
return type(self)(self.fget, fset, self.fdel, self.__doc__)
def deleter(self, fdel):
return type(self)(self.fget, self.fset, fdel, self.__doc__)
class TestProperty(object):
def __init__(self):
self._name = 'kuang'
@property
def name(self):
print('我是descriptor,装饰器property.')
return self._name
@name.setter
def name(self, value):
"""
其中name.setter返回property对象,装饰器是一个语法糖,相当于如下调用:
name.setter(name)
此时name.setter(name).fget == name
"""
self._name = value
def me(self, name):
print('Name:', name)
testObj = TestProperty()
print('实例拥有的属性值:', testObj.__dict__)
print('类拥有的属性值:', TestProperty.__dict__)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Bing Search API toolkit."""
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.tools import BingSearchResults, BingSearchRun
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BingSearchRun": "langchain_community.tools",
"BingSearchResults": "langchain_community.tools",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BingSearchResults",
"BingSearchRun",
]
|
python
|
github
|
https://github.com/langchain-ai/langchain
|
libs/langchain/langchain_classic/tools/bing_search/__init__.py
|
#!/usr/bin/python2.6
# -*- coding: utf-8 -*-
# This file is a part of Metagam project.
#
# Metagam is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# Metagam is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Metagam. If not, see <http://www.gnu.org/licenses/>.
import unittest
from concurrence import dispatch, Tasklet
import mg.test.testorm
from mg.core.memcached import Memcached
from mg.core.cass import CassandraPool
class TestORM_Storage2(mg.test.testorm.TestORM):
def setUp(self):
mg.test.testorm.TestORM.setUp(self)
self.db.storage = 2
self.db.app = "testapp"
def main():
mg.test.testorm.cleanup()
unittest.main()
if __name__ == "__main__":
dispatch(main)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import os
from disposableredis import DisposableRedis
REDIS_MODULE_PATH_ENVVAR = 'REDIS_MODULE_PATH'
REDIS_PATH_ENVVAR = 'REDIS_PATH'
REDIS_PORT_ENVVAR = 'REDIS_PORT'
def execute_query(redis_graph, query_desc, query):
print(query_desc)
print("query: {query}".format(query=query))
print("execution plan:\n{plan}".format(plan=redis_graph.execution_plan(query)))
query_res = redis_graph.query(query)
query_res.pretty_print()
print("\n")
def _redis():
module_path = os.getenv(REDIS_MODULE_PATH_ENVVAR)
redis_path = os.getenv(REDIS_PATH_ENVVAR)
fixed_port = os.getenv(REDIS_PORT_ENVVAR)
if module_path is None:
print("Undeclared environment variable {}".format(REDIS_MODULE_PATH_ENVVAR))
print("run: export {}=../../src/redisgraph.so".format(REDIS_MODULE_PATH_ENVVAR))
return None
if redis_path is None:
print("Undeclared environment variable {}".format(REDIS_PATH_ENVVAR))
print("run: export {}=<path_to_redis-server>".format(REDIS_PATH_ENVVAR))
return None
_module_path = os.path.abspath(os.path.join(os.getcwd(), module_path))
port = None
if fixed_port is not None:
port = fixed_port
print("port=%s, path=%s, loadmodule=%s" % (port, redis_path, _module_path))
dr = DisposableRedis(port=port, path=redis_path, loadmodule=_module_path)
return dr
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.utils import DatabaseError
from django.test import TransactionTestCase, mock, skipUnlessDBFeature
from .models import Article, Reporter
class IntrospectionTests(TransactionTestCase):
available_apps = ['introspection']
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertIn(Reporter._meta.db_table, tl,
"'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertIn(Article._meta.db_table, tl,
"'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertNotIn('django_ixn_test_table', tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Table name is a list #15216
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_table_names_with_views(self):
with connection.cursor() as cursor:
try:
cursor.execute(
'CREATE VIEW introspection_article_view AS SELECT headline '
'from introspection_article;')
except DatabaseError as e:
if 'insufficient privileges' in str(e):
self.fail("The test user has no CREATE VIEW privileges")
else:
raise
self.assertIn('introspection_article_view',
connection.introspection.table_names(include_views=True))
self.assertNotIn('introspection_article_view',
connection.introspection.table_names())
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, {Article, Reporter})
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertIn(expected, sequences,
'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['AutoField' if connection.features.can_introspect_autofield else 'IntegerField',
'CharField', 'CharField', 'CharField',
'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField',
'BinaryField' if connection.features.can_introspect_binary_field else 'TextField',
'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField']
)
# The following test fails on Oracle due to #17202 (can't correctly
# inspect the length of character columns).
@skipUnlessDBFeature('can_introspect_max_length')
def test_get_table_description_col_lengths(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[3] for r in desc if datatype(r[1], r) == 'CharField'],
[30, 30, 254]
)
@skipUnlessDBFeature('can_introspect_null')
def test_get_table_description_nullable(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
nullable_by_backend = connection.features.interprets_empty_strings_as_nulls
self.assertEqual(
[r[6] for r in desc],
[False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False]
)
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_relations(self):
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# That's {field_name: (field_name_other_table, other_table)}
expected_relations = {
'reporter_id': ('id', Reporter._meta.db_table),
'response_to_id': ('id', Article._meta.db_table),
}
self.assertEqual(relations, expected_relations)
# Removing a field shouldn't disturb get_relations (#17785)
body = Article._meta.get_field('body')
with connection.schema_editor() as editor:
editor.remove_field(Article, body)
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
with connection.schema_editor() as editor:
editor.add_field(Article, body)
self.assertEqual(relations, expected_relations)
@skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue")
def test_get_relations_alt_format(self):
"""With SQLite, foreign keys can be added with different syntaxes."""
with connection.cursor() as cursor:
cursor.fetchone = mock.Mock(return_value=[
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES %s(id));" % Article._meta.db_table
])
relations = connection.introspection.get_relations(cursor, 'mocked_table')
self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)})
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_key_columns(self):
with connection.cursor() as cursor:
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(
set(key_columns),
{('reporter_id', Reporter._meta.db_table, 'id'),
('response_to_id', Article._meta.db_table, 'id')})
def test_get_primary_key_column(self):
with connection.cursor() as cursor:
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
self.assertEqual(primary_key_column, 'id')
def test_get_indexes(self):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
def test_get_indexes_multicol(self):
"""
Test that multicolumn indexes are not included in the introspection
results.
"""
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table)
self.assertNotIn('first_name', indexes)
self.assertIn('id', indexes)
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
|
unknown
|
codeparrot/codeparrot-clean
| ||
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import socket
import pycurl
import unittest
try:
import urllib.parse as urllib_parse
except ImportError:
import urllib as urllib_parse
from . import appmanager
from . import util
setup_module, teardown_module = appmanager.setup(('app', 8380))
socket_open_called = False
def socket_open(family, socktype, protocol):
global socket_open_called
socket_open_called = True
#print(family, socktype, protocol)
s = socket.socket(family, socktype, protocol)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
return s
class SocketOpenTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
def test_socket_open(self):
self.curl.setopt(pycurl.OPENSOCKETFUNCTION, socket_open)
self.curl.setopt(self.curl.URL, 'http://localhost:8380/success')
sio = util.StringIO()
self.curl.setopt(pycurl.WRITEFUNCTION, sio.write)
self.curl.perform()
assert socket_open_called
self.assertEqual('success', sio.getvalue())
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
class MyCache(BaseCache):
assoc = 2
block_size = 64
latency = '1ns'
mshrs = 10
tgts_per_mshr = 5
class MyL1Cache(MyCache):
is_top_level = True
tgts_per_mshr = 20
cpu = DerivO3CPU(cpu_id=0)
cpu.addTwoLevelCacheHierarchy(MyL1Cache(size = '128kB'),
MyL1Cache(size = '256kB'),
MyCache(size = '2MB'))
cpu.clock = '2GHz'
system = System(cpu = cpu,
physmem = SimpleMemory(),
membus = CoherentBus())
system.system_port = system.membus.slave
system.physmem.port = system.membus.master
# create the interrupt controller
cpu.createInterruptController()
cpu.connectAllPorts(system.membus)
root = Root(full_system = False, system = system)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2012-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.logging.logback;
import java.io.Console;
import java.util.function.BiConsumer;
import java.util.function.Function;
import ch.qos.logback.core.util.FileSize;
import org.jspecify.annotations.Nullable;
import org.springframework.boot.logging.LogFile;
import org.springframework.boot.logging.LoggingSystemProperties;
import org.springframework.core.convert.ConversionFailedException;
import org.springframework.core.convert.ConverterNotFoundException;
import org.springframework.core.env.Environment;
import org.springframework.core.env.PropertyResolver;
import org.springframework.util.ClassUtils;
import org.springframework.util.unit.DataSize;
/**
* {@link LoggingSystemProperties} for Logback.
*
* @author Phillip Webb
* @since 2.4.0
* @see RollingPolicySystemProperty
*/
public class LogbackLoggingSystemProperties extends LoggingSystemProperties {
private static final boolean JBOSS_LOGGING_PRESENT = ClassUtils.isPresent("org.jboss.logging.Logger",
LogbackLoggingSystemProperties.class.getClassLoader());
public LogbackLoggingSystemProperties(Environment environment) {
super(environment);
}
/**
* Create a new {@link LogbackLoggingSystemProperties} instance.
* @param environment the source environment
* @param setter setter used to apply the property
* @since 2.4.3
*/
public LogbackLoggingSystemProperties(Environment environment,
@Nullable BiConsumer<String, @Nullable String> setter) {
super(environment, setter);
}
/**
* Create a new {@link LoggingSystemProperties} instance.
* @param environment the source environment
* @param defaultValueResolver function used to resolve default values or {@code null}
* @param setter setter used to apply the property or {@code null} for system
* properties
* @since 3.2.0
*/
public LogbackLoggingSystemProperties(Environment environment,
@Nullable Function<@Nullable String, @Nullable String> defaultValueResolver,
@Nullable BiConsumer<String, @Nullable String> setter) {
super(environment, defaultValueResolver, setter);
}
@Override
protected @Nullable Console getConsole() {
return super.getConsole();
}
@Override
protected void apply(@Nullable LogFile logFile, PropertyResolver resolver) {
super.apply(logFile, resolver);
applyJBossLoggingProperties();
applyRollingPolicyProperties(resolver);
}
private void applyJBossLoggingProperties() {
if (JBOSS_LOGGING_PRESENT) {
setSystemProperty("org.jboss.logging.provider", "slf4j");
}
}
private void applyRollingPolicyProperties(PropertyResolver resolver) {
applyRollingPolicy(RollingPolicySystemProperty.FILE_NAME_PATTERN, resolver);
applyRollingPolicy(RollingPolicySystemProperty.CLEAN_HISTORY_ON_START, resolver);
applyRollingPolicy(RollingPolicySystemProperty.MAX_FILE_SIZE, resolver, DataSize.class);
applyRollingPolicy(RollingPolicySystemProperty.TOTAL_SIZE_CAP, resolver, DataSize.class);
applyRollingPolicy(RollingPolicySystemProperty.MAX_HISTORY, resolver);
}
private void applyRollingPolicy(RollingPolicySystemProperty property, PropertyResolver resolver) {
applyRollingPolicy(property, resolver, String.class);
}
private <T> void applyRollingPolicy(RollingPolicySystemProperty property, PropertyResolver resolver,
Class<T> type) {
T value = getProperty(resolver, property.getApplicationPropertyName(), type);
if (value != null) {
String stringValue = String.valueOf((value instanceof DataSize dataSize) ? dataSize.toBytes() : value);
setSystemProperty(property.getEnvironmentVariableName(), stringValue);
}
}
@SuppressWarnings("unchecked")
private <T> @Nullable T getProperty(PropertyResolver resolver, String key, Class<T> type) {
try {
return resolver.getProperty(key, type);
}
catch (ConversionFailedException | ConverterNotFoundException ex) {
if (type != DataSize.class) {
throw ex;
}
String value = resolver.getProperty(key);
return (T) DataSize.ofBytes(FileSize.valueOf(value).getSize());
}
}
}
|
java
|
github
|
https://github.com/spring-projects/spring-boot
|
core/spring-boot/src/main/java/org/springframework/boot/logging/logback/LogbackLoggingSystemProperties.java
|
# Copyright 2013 Free Software Foundation, Inc.
#
# This is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import gcc
import gccutils
import sys
want_raii_info = False
logging = False
show_cfg = False
def log(msg, indent=0):
global logging
if logging:
sys.stderr.write('%s%s\n' % (' ' * indent, msg))
sys.stderr.flush()
def is_cleanup_type(return_type):
if not isinstance(return_type, gcc.PointerType):
return False
if not isinstance(return_type.dereference, gcc.RecordType):
return False
if str(return_type.dereference.name) == 'cleanup':
return True
return False
def is_constructor(decl):
"Return True if the function DECL is a cleanup constructor; False otherwise"
return is_cleanup_type(decl.type.type) and (not decl.name or str(decl.name) != 'make_final_cleanup')
destructor_names = set(['do_cleanups', 'discard_cleanups'])
def is_destructor(decl):
return decl.name in destructor_names
# This list is just much too long... we should probably have an
# attribute instead.
special_names = set(['do_final_cleanups', 'discard_final_cleanups',
'save_cleanups', 'save_final_cleanups',
'restore_cleanups', 'restore_final_cleanups',
'exceptions_state_mc_init',
'make_my_cleanup2', 'make_final_cleanup', 'all_cleanups',
'save_my_cleanups', 'quit_target'])
def needs_special_treatment(decl):
return decl.name in special_names
# Sometimes we need a new placeholder object that isn't the same as
# anything else.
class Dummy(object):
def __init__(self, location):
self.location = location
# A wrapper for a cleanup which has been assigned to a variable.
# This holds the variable and the location.
class Cleanup(object):
def __init__(self, var, location):
self.var = var
self.location = location
# A class representing a master cleanup. This holds a stack of
# cleanup objects and supports a merging operation.
class MasterCleanup(object):
# Create a new MasterCleanup object. OTHER, if given, is a
# MasterCleanup object to copy.
def __init__(self, other = None):
# 'cleanups' is a list of cleanups. Each element is either a
# Dummy, for an anonymous cleanup, or a Cleanup, for a cleanup
# which was assigned to a variable.
if other is None:
self.cleanups = []
self.aliases = {}
else:
self.cleanups = other.cleanups[:]
self.aliases = dict(other.aliases)
def compare_vars(self, definition, argument):
if definition == argument:
return True
if argument in self.aliases:
argument = self.aliases[argument]
if definition in self.aliases:
definition = self.aliases[definition]
return definition == argument
def note_assignment(self, lhs, rhs):
log('noting assignment %s = %s' % (lhs, rhs), 4)
self.aliases[lhs] = rhs
# Merge with another MasterCleanup.
# Returns True if this resulted in a change to our state.
def merge(self, other):
# We do explicit iteration like this so we can easily
# update the list after the loop.
counter = -1
found_named = False
for counter in range(len(self.cleanups) - 1, -1, -1):
var = self.cleanups[counter]
log('merge checking %s' % var, 4)
# Only interested in named cleanups.
if isinstance(var, Dummy):
log('=> merge dummy', 5)
continue
# Now see if VAR is found in OTHER.
if other._find_var(var.var) >= 0:
log ('=> merge found', 5)
break
log('=>merge not found', 5)
found_named = True
if found_named and counter < len(self.cleanups) - 1:
log ('merging to %d' % counter, 4)
if counter < 0:
self.cleanups = []
else:
self.cleanups = self.cleanups[0:counter]
return True
# If SELF is empty but OTHER has some cleanups, then consider
# that a change as well.
if len(self.cleanups) == 0 and len(other.cleanups) > 0:
log('merging non-empty other', 4)
self.cleanups = other.cleanups[:]
return True
return False
# Push a new constructor onto our stack. LHS is the
# left-hand-side of the GimpleCall statement. It may be None,
# meaning that this constructor's value wasn't used.
def push(self, location, lhs):
if lhs is None:
obj = Dummy(location)
else:
obj = Cleanup(lhs, location)
log('pushing %s' % lhs, 4)
idx = self._find_var(lhs)
if idx >= 0:
gcc.permerror(location, 'reassigning to known cleanup')
gcc.inform(self.cleanups[idx].location,
'previous assignment is here')
self.cleanups.append(obj)
# A helper for merge and pop that finds BACK_TO in self.cleanups,
# and returns the index, or -1 if not found.
def _find_var(self, back_to):
for i in range(len(self.cleanups) - 1, -1, -1):
if isinstance(self.cleanups[i], Dummy):
continue
if self.compare_vars(self.cleanups[i].var, back_to):
return i
return -1
# Pop constructors until we find one matching BACK_TO.
# This is invoked when we see a do_cleanups call.
def pop(self, location, back_to):
log('pop:', 4)
i = self._find_var(back_to)
if i >= 0:
self.cleanups = self.cleanups[0:i]
else:
gcc.permerror(location, 'destructor call with unknown argument')
# Check whether ARG is the current master cleanup. Return True if
# all is well.
def verify(self, location, arg):
log('verify %s' % arg, 4)
return (len(self.cleanups) > 0
and not isinstance(self.cleanups[0], Dummy)
and self.compare_vars(self.cleanups[0].var, arg))
# Check whether SELF is empty.
def isempty(self):
log('isempty: len = %d' % len(self.cleanups), 4)
return len(self.cleanups) == 0
# Emit informational warnings about the cleanup stack.
def inform(self):
for item in reversed(self.cleanups):
gcc.inform(item.location, 'leaked cleanup')
class CleanupChecker:
def __init__(self, fun):
self.fun = fun
self.seen_edges = set()
self.bad_returns = set()
# This maps BB indices to a list of master cleanups for the
# BB.
self.master_cleanups = {}
# Pick a reasonable location for the basic block BB.
def guess_bb_location(self, bb):
if isinstance(bb.gimple, list):
for stmt in bb.gimple:
if stmt.loc:
return stmt.loc
return self.fun.end
# Compute the master cleanup list for BB.
# Modifies MASTER_CLEANUP in place.
def compute_master(self, bb, bb_from, master_cleanup):
if not isinstance(bb.gimple, list):
return
curloc = self.fun.end
for stmt in bb.gimple:
if stmt.loc:
curloc = stmt.loc
if isinstance(stmt, gcc.GimpleCall) and stmt.fndecl:
if is_constructor(stmt.fndecl):
log('saw constructor %s in bb=%d' % (str(stmt.fndecl), bb.index), 2)
self.cleanup_aware = True
master_cleanup.push(curloc, stmt.lhs)
elif is_destructor(stmt.fndecl):
if str(stmt.fndecl.name) != 'do_cleanups':
self.only_do_cleanups_seen = False
log('saw destructor %s in bb=%d, bb_from=%d, argument=%s'
% (str(stmt.fndecl.name), bb.index, bb_from, str(stmt.args[0])),
2)
master_cleanup.pop(curloc, stmt.args[0])
elif needs_special_treatment(stmt.fndecl):
pass
# gcc.permerror(curloc, 'function needs special treatment')
elif isinstance(stmt, gcc.GimpleAssign):
if isinstance(stmt.lhs, gcc.VarDecl) and isinstance(stmt.rhs[0], gcc.VarDecl):
master_cleanup.note_assignment(stmt.lhs, stmt.rhs[0])
elif isinstance(stmt, gcc.GimpleReturn):
if self.is_constructor:
if not master_cleanup.verify(curloc, stmt.retval):
gcc.permerror(curloc,
'constructor does not return master cleanup')
elif not self.is_special_constructor:
if not master_cleanup.isempty():
if curloc not in self.bad_returns:
gcc.permerror(curloc, 'cleanup stack is not empty at return')
self.bad_returns.add(curloc)
master_cleanup.inform()
# Traverse a basic block, updating the master cleanup information
# and propagating to other blocks.
def traverse_bbs(self, edge, bb, bb_from, entry_master):
log('traverse_bbs %d from %d' % (bb.index, bb_from), 1)
# Propagate the entry MasterCleanup though this block.
master_cleanup = MasterCleanup(entry_master)
self.compute_master(bb, bb_from, master_cleanup)
modified = False
if bb.index in self.master_cleanups:
# Merge the newly-computed MasterCleanup into the one we
# have already computed. If this resulted in a
# significant change, then we need to re-propagate.
modified = self.master_cleanups[bb.index].merge(master_cleanup)
else:
self.master_cleanups[bb.index] = master_cleanup
modified = True
# EDGE is None for the entry BB.
if edge is not None:
# If merging cleanups caused a change, check to see if we
# have a bad loop.
if edge in self.seen_edges:
# This error doesn't really help.
# if modified:
# gcc.permerror(self.guess_bb_location(bb),
# 'invalid cleanup use in loop')
return
self.seen_edges.add(edge)
if not modified:
return
# Now propagate to successor nodes.
for edge in bb.succs:
self.traverse_bbs(edge, edge.dest, bb.index, master_cleanup)
def check_cleanups(self):
if not self.fun.cfg or not self.fun.decl:
return 'ignored'
if is_destructor(self.fun.decl):
return 'destructor'
if needs_special_treatment(self.fun.decl):
return 'special'
self.is_constructor = is_constructor(self.fun.decl)
self.is_special_constructor = not self.is_constructor and str(self.fun.decl.name).find('with_cleanup') > -1
# Yuck.
if str(self.fun.decl.name) == 'gdb_xml_create_parser_and_cleanup_1':
self.is_special_constructor = True
if self.is_special_constructor:
gcc.inform(self.fun.start, 'function %s is a special constructor' % (self.fun.decl.name))
# If we only see do_cleanups calls, and this function is not
# itself a constructor, then we can convert it easily to RAII.
self.only_do_cleanups_seen = not self.is_constructor
# If we ever call a constructor, then we are "cleanup-aware".
self.cleanup_aware = False
entry_bb = self.fun.cfg.entry
master_cleanup = MasterCleanup()
self.traverse_bbs(None, entry_bb, -1, master_cleanup)
if want_raii_info and self.only_do_cleanups_seen and self.cleanup_aware:
gcc.inform(self.fun.decl.location,
'function %s could be converted to RAII' % (self.fun.decl.name))
if self.is_constructor:
return 'constructor'
return 'OK'
class CheckerPass(gcc.GimplePass):
def execute(self, fun):
if fun.decl:
log("Starting " + fun.decl.name)
if show_cfg:
dot = gccutils.cfg_to_dot(fun.cfg, fun.decl.name)
gccutils.invoke_dot(dot, name=fun.decl.name)
checker = CleanupChecker(fun)
what = checker.check_cleanups()
if fun.decl:
log(fun.decl.name + ': ' + what, 2)
ps = CheckerPass(name = 'check-cleanups')
# We need the cfg, but we want a relatively high-level Gimple.
ps.register_after('cfg')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import with_statement
from __future__ import absolute_import
import gc
import warnings
import numpy as np
from nipy.modalities.fmri.api import axis0_generator, FmriImageList
from nipy.core.api import parcels, Image, AffineTransform as AfT
from nipy.io.api import load_image, save_image
from nose.tools import assert_equal, assert_true
from nibabel.tmpdirs import InTemporaryDirectory
from nipy.testing import funcfile
def setup():
# Suppress warnings during tests to reduce noise
warnings.simplefilter("ignore")
def teardown():
# Clear list of warning filters
warnings.resetwarnings()
def test_write():
fname = 'myfile.nii'
img = load_image(funcfile)
with InTemporaryDirectory():
save_image(img, fname)
test = FmriImageList.from_image(load_image(fname))
assert_equal(test[0].affine.shape, (4,4))
assert_equal(img[0].affine.shape, (5,4))
# Check the affine...
A = np.identity(4)
A[:3,:3] = img[:,:,:,0].affine[:3,:3]
A[:3,-1] = img[:,:,:,0].affine[:3,-1]
assert_true(np.allclose(test[0].affine, A))
del test
def test_iter():
img = load_image(funcfile)
img_shape = img.shape
exp_shape = (img_shape[0],) + img_shape[2:]
j = 0
for i, d in axis0_generator(img.get_data()):
j += 1
assert_equal(d.shape, exp_shape)
del(i); gc.collect()
assert_equal(j, img_shape[1])
def test_subcoordmap():
img = load_image(funcfile)
subcoordmap = img[3].coordmap
xform = img.affine[:,1:]
assert_true(np.allclose(subcoordmap.affine[1:], xform[1:]))
assert_true(np.allclose(subcoordmap.affine[0], [0,0,0,img.coordmap([3,0,0,0])[0]]))
def test_labels1():
img = load_image(funcfile)
data = img.get_data()
parcelmap = Image(img[0].get_data(), AfT('kji', 'zyx', np.eye(4)))
parcelmap = (parcelmap.get_data() * 100).astype(np.int32)
v = 0
for i, d in axis0_generator(data, parcels(parcelmap)):
v += d.shape[1]
assert_equal(v, parcelmap.size)
|
unknown
|
codeparrot/codeparrot-clean
| ||
__author__ = 'chris hamm'
#NetworkServer_r13B
#Created: 2/19/2015
#DEsigned to run with NetworkClient_r13B
#Changes from previous revision:
#(Implemented) Replace the stackOfChunksThatNeedToBeReassigned with the queue of chunks
#changed receive chunk from controller setup to have chunk stored in the queue
#changed the receive nextchunk request from client setup to send the chunk from the queue, and if queue is empty, throw a warning
#STill checks for if clients are in the stackof waiting
#[MIGHT NOT BE NEEDED](Not implemented yet) Add a lock for the queue of stored chunks so there is no threading conflict
#(Implemented) When server starts up, immeadiately request 5 chunks from the controller
#(Implemented) Add a queue to hold excess chunks at all times, the queue is a buffer to improve speed slightly
#(Implemented) Removed old unused functions and useless comments
def compareString(inboundStringA, inboundStringB, startA, startB, endA, endB):
try:
posA = startA
posB = startB
if((endA-startA) != (endB-startB)):
return False
for x in range(startA,endA):
tempCharA= inboundStringA[posA]
tempCharB= inboundStringB[posB]
if(tempCharA != tempCharB):
return False
posA+= 1
posB+= 1
return True
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in compareString Function: " +str(inst)+"\n"
print "========================================================================\n"
return False
def extractSolutionFromFoundSolutionTuple(self, inboundTuple):
try:
theSolution = str(inboundTuple[1]) #second element in the tuple
return theSolution
except Exception as inst:
print "===========================================================\n"
print "Exception thrown in extractSolutionFromFoundSolutionTuple: "+str(inst)+"\n"
print "===========================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in extractSolutionFromFoundSolutionTuple", "Self", "Self")
return "" #return empty string
#Stack of IO Commands=======================================================
def pushCommandOntoTheStackOfIOCommands(self, commandName, commandOrigin_Destination, commandDirection):
try:
import time
current_time= time.time()
#print "Acquiring the stackOfIOCommands Lock"
self.stackOfIOCommandsLock.acquire()
#print "Acquired the stackOfIOCommands Lock"
self.stackOfIOCommands.append((commandName, commandOrigin_Destination, commandDirection, current_time ))#tuple contains name, origin/destination, direction, time
except Exception as inst:
print "======================================================\n"
print "Exception was thrown in pushCommandOntoTheStackOfIOCommands: "+str(inst)+"\n"
print "=======================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in pushCommandOnToTheStackOfIOCommands", "Self", "Self")
finally:
#print "Releasing the stackOfIOCommands Lock"
self.stackOfIOCommandsLock.release()
#print "Released the stackOfIOCommands Lock"
#Inbound commands from controller==========================================
def checkForDoneCommandFromController(self, inboundString):
try:
#print "Checking for done Command from the Controller\n"
if(compareString(str(inboundString),"done",0,0,len("done"),len("done"))==True):
#print "done Command was received from the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "done", "Controller","Inbound" )
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForDoneCommandFromController: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForDoneCommandFromCOntroller", "Controller", "Inbound")
return False
def checkForNextChunkCommandFromController(self, inboundString):
try:
#print "Checking for nextChunk Command from the Controller\n"
if(compareString(str(inboundString),"nextChunk",0,0,len("nextChunk"),len("nextChunk"))==True):
#print "nextChunk Command was received from the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "nextChunk", "Controller", "Inbound")
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForNextChunkCommandFromController: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForNextChunkCommandFromController", "Controller", "Inbound")
return False
def receiveNextChunkFromController(self):
try:
#print "Receiving Chunk From the Pipe\n"
inboundChunk= self.pipe.recv()
# print "Received the Chunk from the pipe\n"
pushCommandOntoTheStackOfIOCommands(self, "nextChunk", "Controller", "Inbound")
return inboundChunk
except Exception as inst:
print "========================================================================\n"
print "ERROR in receiveNextChunkFromController: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in receiveNextChunkFromController", "Controller", "Inbound")
return ""
#Outbound commands to controller======================================
def sendNextChunkCommandToController(self):
try:
# print "Sending nextChunk Command to the Controller\n"
self.pipe.send("nextChunk")
#print "Sent the nextChunk Command to the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "nextChunk", "Controller", "Outbound")
except Exception as inst:
print "========================================================================\n"
print "Exception was thrown in sendNextChunkCommandToController: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendNextChunkCommandToController", "Controller", "Outbound")
def sendDoneCommandToController(self):
try:
# print "Sending done Command to the Controller\n"
self.pipe.send("done")
# print "Sent the done Command to the Controller\n"
pushCommandOntoTheStackOfIOCommands(self, "done", "Controller", "Outbound")
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in sendDoneCommandToController: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendDoneCommandToController", "Controller", "Outbound")
def sendSolutionToController(self):
try:
#get the solution from the class variable that stores it
#print "Sending Solution To Controller\n"
self.pipe.send(str(self.theSolution))
# print "Sent solution to the controller\n"
pushCommandOntoTheStackOfIOCommands(self, "sendSolution", "Controller", "Outbound")
except Exception as inst:
print "==================================================================\n"
print "Exception thrown in sendSolutionToController: "+str(inst)+"\n"
print "==================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendSolutionToController", "Controller", "Outbound")
#Inbound commands from the client=========================================
def checkForCrashedCommandFromClient(self,inboundData): #NOTE: This is NOT modelled after the check for crash command in the previous revisions
try:
# print "Checking for the Crashed Command from the Client\n"
if(compareString(str(inboundData),"CRASHED",0,0,len("CRASHED"),len("CRASHED"))==True):
#print "Crash Command was received from the Client\n"
pushCommandOntoTheStackOfIOCommands(self, "CRASHED", "Client", "Inbound")
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForCrashedCommandFromClient: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForCrashedCommandFromClient", "Client", "Inbound")
return False
def checkForFoundSolutionCommandFromClient(self,inboundData):
try:
#print "Checking for the Found Solution Command from the client\n"
if(compareString(str(inboundData),"FOUNDSOLUTION",0,0,len("FOUNDSOLUTION"),len("FOUNDSOLUTION"))): #access the first element iin the tuple
#print "FOUNDSOLUTION Command was received from the client\n"
#Extracting solution from the string
#inboundData= FOUNDSOLUTION [solution]
#inboundData[0:14]= FOUNDSOLUTION (including the space)
#first bracket is at [15]
openingBracketPos = 15
closingBracketPos = 15
theInboundSolution = ""
for index in range(openingBracketPos, len(inboundData)):
if(inboundData[index] == "]"):
# print "Extraction of solution is complete\n"
closingBracketPos= index
break
else:
theInboundSolution+= str(inboundData[index])
if(closingBracketPos == 15): #where it started
raise Exception("closing bracket not found")
pushCommandOntoTheStackOfIOCommands(self, "FOUNDSOLUTION", "Client", "Inbound")
#set the class variab;e that holds the solution
self.theSolution= theInboundSolution
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception thrown in checkForFoundSolutionCommandFromClient: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForFoundSOlutionCommandFromClient", "Client", "Inbound")
return False
def checkForNextCommandFromClient(self,inboundData):
try:
# print "Checking for the Next command from the client\n"
if(compareString(str(inboundData),"NEXT",0,0,len("NEXT"),len("NEXT"))):
#print "NEXT command was received from the client\n"
pushCommandOntoTheStackOfIOCommands(self, "NEXT", "Client", "Inbound")
return True
else:
return False
except Exception as inst:
print "========================================================================\n"
print "Exception was thrown in checkForNextCommandFromClient: " +str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in checkForNextCommandFromCLient", "Client", "Inbound")
return False
def receiveCommandFromClient(self, clientSocket): #NOTE new function, used to receive normal commands
while True:
try:
receivedCommandFromClient = ""
#print "Acquiring socketLock"
self.socketLock.acquire()
#print "Acquired socketLock"
#print "Checking for inbound client Commands"
clientSocket.settimeout(0.25)
clientInput= clientSocket.recv(4096)
if(len(clientInput) > 0):
receivedCommandFromClient= clientInput
break
#return command in finally block for this function
except Exception as inst:
if(compareString(str(inst),"[Errno 35] Resource temporarily unavailable",0,0,len("[Errno 35] Resource temporarily unavailable"),len("[Errno 35] Resource temporarily unavailable"))==True):
print "[Errno 35] Resource is not available in receiveCommandFromClient, trying again.\n"
elif(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#ignore, do no print out error
break
else:
print "===================================================================\n"
print "ERROR in receiveCommandFromClient: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in receiveCommandFromClient", "Client", "Inbound")
receivedCommandFromClient= ""#set to empty string
break
finally:
#print "Releasing socketLock\n"
self.socketLock.release()
#print "Released socketLock\n"
return receivedCommandFromClient
#Outbound commands to client==================================================
def sendDoneCommandToClient(self,networkSocket, clientIP):
#print "Issuing Done Command to Client: " + str(clientIP) +"\n"
#print "Acquiring socket lock\n"
self.socketLock.acquire()
#print "Acquired socketLock\n"
networkSocket.settimeout(0.25)
#print "socket lock acquired\n"
try: #send try block
# print "preparing to send done command to client\n"
networkSocket.send("done")
#print "sent Done command to client: " +str(clientIP) +"\n"
pushCommandOntoTheStackOfIOCommands(self, "done", "Client", "Outbound")
except Exception as inst:
if(compareString(str(inst),"[Errno 32] Broken pipe",0,0,len("[Errno 32] Broken pipe"),len("[Errno 32] Broken pipe"))):
print "========================================================================\n"
print "Exception thrown in sendDoneCommandToClient: Broken pipe error detected in send try block\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendDoneCommandToClient: Broken Pipe", "Client", "Outbound")
else:
print "========================================================================\n"
print "Exception in send Done command: " +str(inst) +"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "EXCEPTION in sendDoneCommandToClient", "Client", "Outbound")
finally:
#print "Releasing the socketLock\n"
self.socketLock.release()
#print "Released socketLock\n"
def sendNextCommandToClientByLength(self, clientSocket, chunkObject): #This sends the measurements to the client in length instead of file size
try:
#print "Acquiring the socketLock\n"
self.socketLock.acquire()
#print "Acquired the socketLock\n"
chunkParamLength = len(str(chunkObject.params))
chunkDataLength = len(str(chunkObject.data))
#Create the command string
commandString= ""
try:
commandString = "NEXT PSIZE("+str(chunkParamLength)+") DSIZE("+str(chunkDataLength)+")\n" #keeping same names, even though it is length
except Exception as inst:
print "========================================================================\n"
print "Error in create command string step of sendNextCommandToCLientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in createCommandStringStep of sendNextCommandToClientByLength", "Client", "Outbound")
#Send command string to the client
try:
# print "Sending command string to the client\n"
clientSocket.send(commandString)
import time
time.sleep(0.25)
# print "Sent the command string to the client\n"
except Exception as inst:
print "========================================================================\n"
print "Error in send command string to client in sendNextCOmmandToCLientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendCommandStringToClient of sendNextCommandToClientByLength", "Client", "Outbound")
#Send the chunk params to the client
try:
# print "Sending chunk params to the client\n"
while True:
try:
clientSocket.send(str(chunkObject.params))
# print "Sent chunk params to the client\n"
pushCommandOntoTheStackOfIOCommands(self, "next: chunk.params", "Client", "Outbound")
break
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#dont throw an error, just try again
fakeVar=True
else:
raise Exception ("Error in sending chunk params to the client in infinite while loop")
break
except Exception as inst:
print "========================================================================\n"
print "Error in send chunk params to the client in sendNextCOmmandToClientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendChunkParamsToClient of sendNextCommandToClientByLength", "Client", "Outbound")
#send the chunk data to the client
try:
# print "Sending chunk data to the client\n"
while True:
try:
clientSocket.send(str(chunkObject.data))
#print "Sent chunk data to the client\n"
pushCommandOntoTheStackOfIOCommands(self, "next: chunk.data", "Client", "Outbound")
break
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#dont throw error, just try again
fakeVar=True
else:
raise Exception ("Error in sending chunk data to the client in infinite loop")
break
except Exception as inst:
print "========================================================================\n"
print "Error in send chunk data to the client in sendNextCOmmandToClientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendChunkDataToClient of sendNextCommandToClientByLength", "Client", "Outbound")
except Exception as inst:
print "========================================================================\n"
print "ERROR in sendNextCommandToClientByLength: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in sendNextCommandToClientByLength", "Client", "Outbound")
finally:
#print "Releasing the socketLock\n"
self.socketLock.release()
#print "Released the socketLock\n"
#dictionaryOfCurrentClientTasks functions================================================================
def addClientToDictionaryOfCurrentClientTasks(self, clientAddress, clientChunk): #client Address has both the ip address and port
try:
self.dictionaryOfCurrentClientTasks[clientAddress] = clientChunk
except Exception as inst:
print "========================================================================\n"
print "ERROR in addClientToDictionaryOfCurrentClientTasks: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in addClientToDictionaryOfCurrentClientTasks", "Self", "Self")
def delClientFromDictionaryOfCurrentClientTasks(self, clientAddress): #clientAddress contains IP and port
try:
del self.dictionaryOfCurrentClientTasks[clientAddress]
except KeyError as inst:
print "========================================================================\n"
print "ERROR: " +str(clientAddress)+" does not exist in the dictionaryOfCurrentClientTasks\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR (Key Error) in delClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
except Exception as inst:
print "========================================================================\n"
print "ERROR in delClientFromDictionaryOfCurrentClientTasks: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in delClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
def getChunkFromDictionaryOfCurrentClientTasks(self, clientAddress): #clientAddress contains IP and port
try:
retrievedChunk = self.dictionaryOfCurrentClientTasks[clientAddress]
return retrievedChunk
except KeyError as inst:
print "========================================================================\n"
print "ERROR: " +str(clientAddress)+" does not exist in the dictionaryOfCurrentClientTasks\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR (Key Error) in getClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
return "" #changed from none
except Exception as inst:
print "========================================================================\n"
print "ERROR in getChunkFromDictionaryOfCurrentClientTasks: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in getClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
return "" #changed from none
def setChunkToDictionaryOfCurrentClientTasks(self, clientAddr, chunkObject):
try:
self.dictionaryOfCurrentClientTasks[clientAddr] = chunkObject
except Exception as inst:
print "=======================================================================\n"
print "ERROR in setChunkToDIctionaryOfCurrentCLientTasks: " +str(inst)+"\n"
print "=======================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in setClientFromDictionaryOfCurrentClientTasks", "Self", "Self")
#list of Crashed clients functions====================================================================
def addClientToListOfCrashedClients(self, clientAddress): #clientAddress has the ip and the port
try:
self.listOfCrashedClients.append(clientAddress)
except Exception as inst:
print "========================================================================\n"
print "ERROR in addClientToListOfCrashedClients: " + str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in addClientToListOfCrashedClients", "Self", "Self")
#stackOfChunksThatNeedToBeReassigned functions==========================================================
def pushChunkOnToStackOfChunksThatNeedToBeReassigned(self, inboundChunk):
try:
#print "Pushing chunk onto the stackOfChunksThatNeedToBeReassigned\n"
self.stackOfChunksThatNeedToBeReassigned.append(inboundChunk)
# print "Pushed chunk onto the stackOfChunksThatNeedToBeReassigned\n"
except Exception as inst:
print "========================================================================\n"
print "ERROR in pushChunkOnToStackOfChunksThatNeedToBeReassigned: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in pushChunksOnToStackOfChunksThatNeedToBeReassigned", "Self", "Self")
def popChunkFromStackOfChunksThatNeedToBeReassigned(self):
try:
poppedChunk = ""
# print "Popping chunk from stackOfChunksThatNeedToBeReassigned\n"
poppedChunk = self.stackOfChunksThatNeedToBeReassigned.pop()
# print "Popped chunk off the stackOfChunksThatNeedToBeReassigned\n"
return poppedChunk
except Exception as inst:
print "========================================================================\n"
print "ERROR in popChunkFromStackOfChunksThatNeedToBeReassigned: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in popChunkFromStackOfChunksThatNeedToBeReassigned", "Self", "Self")
return "" #changed from none
#stackOfClientsWaitingForNextChunk functions============================================================
def pushClientOnToStackOfClientsWaitingForNextChunk(self, clientSocket, clientAddress):
try:
#print "Pushing client on to stackOfClientsWaitingForNextChunk\n"
self.stackOfClientsWaitingForNextChunk.append((clientSocket,clientAddress)) #holds a tuple
# print "Pushed client on to stackOfClientsWaitingForNextChunk\n"
except Exception as inst:
print "========================================================================\n"
print "ERROR in pushClientOnToStackOfClientsWaitingForNextChunk: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in pushClientOnToStackOfClientsWaitingForNextChunk", "Self", "Self")
def popClientFromStackOfClientsWaitingForNextChunk(self):
try:
poppedClient= ""
# print "Popping client off the stackOfClientsWaitingForNextChunk\n"
poppedClient= self.stackOfClientsWaitingForNextChunk.pop()
# print "Popped client off the stackOfClientsWaitingForNextChunk\n"
return poppedClient
except Exception as inst:
print "========================================================================\n"
print "ERROR in popClientFromStackOfClientsWaitingForNextChunk: "+str(inst)+"\n"
print "========================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in popClientFromStackOfClientsWaitingForNextChunk", "Self", "Self")
return "" #changed from none
import threading
import thread
from socket import *
import platform
import Queue
class NetworkServer():
#CLASS VARS
host = ''
port = 55568
myIPAddress = '127.0.0.1' #default to ping back address
stopAllThreads = False #set to true to have all threads break out of their while loops
listOfCrashedClients = []
theSolution = "" #holds the solution if found
stackOfIOCommands = [] #holds a record all the IO commands that have been sent through server
stackOfChunksThatNeedToBeReassigned = [] #THIS CONTAINER IS TO BE REPLACED BY THE QUEUE OF STORED CHUNKS
queueOfStoredChunks = Queue.Queue() #This is the replacement for the staockOFChunkS that need to be reassigned
stackOfClientsWaitingForNextChunk = []
dictionaryOfCurrentClientTasks = {} #key is the client's IP Address , the value is the chunk that client is working on
#If you try to access a non-existing key it will throw an error
socketLock = threading.RLock()
#START OF CLIENT THREAD HANDLER
def ClientThreadHandler(self, clientSocket, clientAddr, socketLock):
try: #CLient THread Handler Try Block
inboundCommandFromClient = "" #initialize the receiving variable
while True:
if(self.stopAllThreads == True):
#print "MAIN THREAD: Stopping the thread\n"
# print "Sending done command to connected client\n"
sendDoneCommandToClient(self, clientSocket, clientAddr)
break
try: #check for commands from client
inboundCommandFromClient = receiveCommandFromClient(self,clientSocket)
except Exception as inst:
print "===================================================================\n"
print "Error in check for commands from the client in client thread handler: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkForCommandsFromTheClientInClientThreadHeader", "Self", "Self")
try: #Analyzing received command from the client try block
if(len(inboundCommandFromClient) > 0): #ignore if the empty string
identifiedCommand = False
try: #checking to see if the next Command was received from the client try block
if(checkForNextCommandFromClient(self,inboundCommandFromClient)==True):
identifiedCommand= True
if(self.queueOfStoredChunks.qsize() > 0):
#import Chunk
#tempChunk = Chunk.Chunk()
tempChunk = self.queueOfStoredChunks.get()
#tempChunk.params = tempChunk2.params
#tempChunk.data = tempChunk2.data
sendNextCommandToClientByLength(self, clientSocket, tempChunk)
try:
testChunk = getChunkFromDictionaryOfCurrentClientTasks(self, clientAddr)
#if succeeds, , then set value
setChunkToDictionaryOfCurrentClientTasks(self, clientAddr, tempChunk)
except Exception as inst:
#add client to the dictionary
addClientToDictionaryOfCurrentClientTasks(self, clientAddr, tempChunk)
#then request nextchunk from controller
sendNextChunkCommandToController(self)
else:
#put client in stack of clients waiting
pushClientOnToStackOfClientsWaitingForNextChunk(self, clientSocket, clientAddr)
#request nextchunk from controller
sendNextChunkCommandToController(self)
print "==================================================\n"
print "WARNING: The queueOfStoredChunks is empty!!!!"
print "==================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "WARNING: QueueOfSToredChunks is Empty!", "Self", "Self")
''' (Now checks the queue)
#print "Identified inboundCommandFromClient as the Next Command\n"
#check to see if there is a chunk that needs to be reassigned
if(len(self.stackOfChunksThatNeedToBeReassigned) > 0):
#print "There is a chunk that needs to be reassigned."
tempChunk = popChunkFromStackOfChunksThatNeedToBeReassigned(self)
sendNextCommandToClientByLength(self, clientSocket, tempChunk)
try:
tempChunk = getChunkFromDictionaryOfCurrentClientTasks(self,clientAddr)
#if suceed, set value
setChunkToDictionaryOfCurrentClientTasks(self,clientAddr,tempChunk)
except Exception as inst:
#add client to the dictionary
addClientToDictionaryOfCurrentClientTasks(self,clientAddr,tempChunk)
else:
#print "There is no chunk that needs to be reassigned. Requesting nextChunk from the Controller"
sendNextChunkCommandToController(self)
#print "Adding the client to the stackOfClientsWaitingForNextChunk"
pushClientOnToStackOfClientsWaitingForNextChunk(self,clientSocket, clientAddr) '''
except Exception as inst:
print "===================================================================\n"
print "Error in checking to see if the next Command was received from the client in client thread handler: "+str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkingForNextCommandFromClient", "Self", "Self")
try: #check to see if the found solution command was received from the client
if(identifiedCommand == False):
if(checkForFoundSolutionCommandFromClient(self,inboundCommandFromClient)==True):
identifiedCommand= True
# print "Identified inboundCommandFromClient as the found solution command\n"
for key in self.dictionaryOfCurrentClientTasks.keys():
sendDoneCommandToClient(self,clientSocket, key) #extracts the key from the dictionary and sends the done command to them
# print "Setting the thread termination value to true, stopping all threads\n"
# print "Acquiring stopAllThreads Lock\n"
self.stopAllThreadsLock.acquire()
# print "Acquired stopAllThreads Lock\n"
self.stopAllThreads = True
#print "Releasing stopAllThreads Lock\n"
self.stopAllThreadsLock.release()
# print "Released stopAllThreads Lock\n"
# print "A client has found the solution!!!!!\n"
break
except Exception as inst:
print "===================================================================\n"
print "Error in check to see if found solution command was received from the client in client thread handler: "+str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkForFoundSOlutionCommandFromClient", "Self", "Self")
try: #check to see if the crashed command was received
if(identifiedCommand == False):
if(checkForCrashedCommandFromClient(self,inboundCommandFromClient)==True):
identifiedCommand= True
# print "Identified inboundCommandFromClient as the Crashed Command\n"
tempChunk = getChunkFromDictionaryOfCurrentClientTasks(self,clientAddr)
pushChunkOnToStackOfChunksThatNeedToBeReassigned(self,tempChunk)
addClientToListOfCrashedClients(self, clientAddr)
delClientFromDictionaryOfCurrentClientTasks(self,clientAddr)
except Exception as inst:
print "===================================================================\n"
print "Error in check to see if crashed command was received from client in client thread handler: "+ str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in checkForCrashedCommandFromClient", "Self", "Self")
if(identifiedCommand == False):
#print "Warning: Unknown Command Received from the client: "+str(inboundCommandFromClient)+"\n"
pushCommandOntoTheStackOfIOCommands(self, "UNKNOWN: "+str(inboundCommandFromClient), "Client", "Inbound")
except Exception as inst:
print "===================================================================\n"
print "Error in Analyzing received command from the client try block in the client thread handler: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in Analyzing received command from the Client", "Self", "Self")
except Exception as inst:
print "===================================================================\n"
print "Error in Client Thread Handler: " + str(inst) +"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in client Thread Handler", "Self", "Self")
finally:
clientSocket.close()
#print "clientSocket has been closed\n"
#print "this thread has closed.\n"
#end of clientthreadhandler
#START OF INITIAL SERVER SETUP
def __init__(self, inboundpipeconnection):
#CLASS VARS
self.pipe = inboundpipeconnection #pipe that connects to the controller
self.stopAllThreadsLock = thread.allocate_lock()
self.stackOfIOCommandsLock = thread.allocate_lock()
#CREATE THE SOCKET
import socket
serverSocket = socket.socket(AF_INET, SOCK_STREAM)
#detect the OS
try: #getOS try block
print "*************************************"
print " Network Server"
print "*************************************"
print "OS DETECTION:"
if(platform.system()=="Windows"): #Detecting Windows
print platform.system()
print platform.win32_ver()
elif(platform.system()=="Linux"): #Detecting Linux
print platform.system()
print platform.dist()
elif(platform.system()=="Darwin"): #Detecting OSX
print platform.system()
print platform.mac_ver()
else: #Detecting an OS that is not listed
print platform.system()
print platform.version()
print platform.release()
print "*************************************"
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getOS try block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in getOS try block", "Self", "Self")
#get the IP address
try: #getIP tryblock
# print "STATUS: Getting your network IP adddress"
if(platform.system()=="Windows"):
print socket.gethostbyname(socket.gethostname())
elif(platform.system()=="Linux"):
#Source: http://stackoverflow.com/questions/11735821/python-get-localhost-ip
#Claims that this works on linux and windows machines
import fcntl
import struct
import os
def get_interface_ip(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s',ifname[:15]))[20:24])
#end of def
def get_lan_ip():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = ["eth0","eth1","eth2","wlan0","wlan1","wifi0","ath0","ath1","ppp0"]
for ifname in interfaces:
try:
ip = get_interface_ip(ifname)
print "IP address was retrieved from the " + str(ifname) + " interface."
break
except IOError:
pass
return ip
#end of def
print get_lan_ip()
elif(platform.system()=="Darwin"):
print socket.gethostbyname(socket.gethostname())
else:
#NOTE: MAY REMOVE THIS AND REPLACE WITH THE LINUX DETECTION METHOD
# print "INFO: The system has detected that you are not running Windows, OS X, or Linux."
# print "INFO: System is using a generic IP detection method"
print socket.gethostbyname(socket.gethostname())
except Exception as inst:
print "========================================================================================"
print "ERROR: An exception was thrown in getIP try block"
print type(inst) #the exception instance
print inst.args #srguments stored in .args
print inst #_str_ allows args tto be printed directly
print "========================================================================================"
pushCommandOntoTheStackOfIOCommands(self, "ERROR in getIP try block", "Self", "Self")
try: #try to bind the socket
serverSocket.bind((self.host, self.port))
except Exception as inst:
print "===================================================================\n"
print "Critical Error: Failed to bind the socket: "+str(inst)+"\n"
print "Suggestion: Close this application, then reopen this application and try again\n"
print "===================================================================\n"
#START LISTENING TO SOCKET
serverSocket.listen(5)
#MAKE INITIAL REQUEST OF CHUNKS TO CONTROLLER FOR THE QUEUE
#Initially requesting 5 chunks
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
sendNextChunkCommandToController(self)
#MAIN THREAD SERVER LOOP
try: #main thread server loop try block
serverSocket.settimeout(0.25)
# print "MAIN THREAD: Waiting for client(s) to connect\n"
while True: #Primary main thread server while loop
if(self.stopAllThreads == True):
#print "MAIN THREAD: Stopping Main Thread\n"
break
#CHECK TO SEE IF A CLIENT IS TRYING TO CONNECT
try:
#print "MAIN THREAD: Checking to see if client is trying to connect\n"
inboundClientSocket, inboundClientAddr = serverSocket.accept()
#print "MAIN THREAD: A client has connected!!\n"
thread.start_new_thread(self.ClientThreadHandler, (inboundClientSocket,inboundClientAddr,self.socketLock))
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#do not display an error message
fakeVar= True
else:
print "===================================================================\n"
print "MAIN THREAD: Error in check for client trying to connect try block: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkForClientTryingToConnect" , "Self", "Self")
#CHECK TO SEE IF CONTROLLER HAS SENT A MESSAGE TO SERVER
try:
# print "MAIN THREAD: Checking for Commands from the controller\n"
if(self.pipe.poll()):
receivedControllerCommand= self.pipe.recv()
if(receivedControllerCommand is not None): #ignore the empty string
#print "MAIN THREAD: Received command from the controller\n"
identifiedCommand = False
try: #checking for nextChunk Command from Controller
if(checkForNextChunkCommandFromController(self,receivedControllerCommand)==True):
identifiedCommand= True
if(len(self.stackOfClientsWaitingForNextChunk) > 0):
tempClientSocket, tempClientAddr = popClientFromStackOfClientsWaitingForNextChunk(self)
#send straight to client
outboundChunk = receiveNextChunkFromController(self)
sendNextCommandToClientByLength(self, tempClientSocket, outboundChunk)
sendNextChunkCommandToController(self)
else:
self.queueOfStoredChunks.put(receiveNextChunkFromController(self)) #put into the queue [NOT USING THE BLOCKING FEATURE]
# print "MAIN THREAD: Identified receivedControllerCommand as the nextChunk Command\n"
''' (Needs to just add the chunk to the queue)
#check to see if a client is waiting for the nextChunk
if(len(self.stackOfClientsWaitingForNextChunk) > 0):
# print "MAIN THREAD: A client is waiting for the nextChunk\n"
tempClientSocket, tempClientAddress= popClientFromStackOfClientsWaitingForNextChunk(self)
outboundChunk = receiveNextChunkFromController(self)
sendNextCommandToClientByLength(self, tempClientSocket, outboundChunk)
try:
tempChunk = getChunkFromDictionaryOfCurrentClientTasks(self,tempClientAddress)
#if, suceeds, override the old chunk
setChunkToDictionaryOfCurrentClientTasks(self,tempClientAddress,outboundChunk)
except Exception as inst:
#add it if there is not key for that client yet
addClientToDictionaryOfCurrentClientTasks(self,tempClientAddress, outboundChunk)
else: #if there is no client waiting for the next chunk
#print "MAIN THREAD: No clients are waiting for the nextChunk. Adding chunk to the stackOfChunksThatNeedToBeReassigned"
pushChunkOnToStackOfChunksThatNeedToBeReassigned(self,receivedControllerCommand)'''
except Exception as inst:
print "===================================================================\n"
print "MAIN THREAD: Error in checking for nextChunk Command from Controller Try Block: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkingForNextChunkCommand from Controller", "Self", "Self")
try: #checking for done command form controller
if(identifiedCommand == False):
if(checkForDoneCommandFromController(self,receivedControllerCommand)==True):
identifiedCommand= True
# print "MAIN THREAD: Identified receivedControllerCommand as the Done Command\n"
#No further actions are needed for this command
except Exception as inst:
print "===================================================================\n"
print "MAIN THREAD: Error in checking for done command from Controller Try Block: "+str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkingForDoneCommand from COntroller", "Self", "Self")
if(identifiedCommand == False):
# print "MAIN THREAD: Warning: Unknown Command Received from the Controller: "+str(receivedControllerCommand)+"\n"
pushCommandOntoTheStackOfIOCommands(self, "UNKNOWN: "+str(receivedControllerCommand), "Controller", "Inbound")
else: #if there is nothing on the pipe
#Do not display the message
fakeVar=True
# print "MAIN THREAD: There is no command received from the controller\n"
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#Do not print out an error message
fakeVar= True
else:
print "===================================================================\n"
print "MAIN THREAD: Error in check to see if controller has sent a message to server try block: " + str(inst) +"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in checkToSeeIfControllerHasSentAMessage", "Self", "Self")
except Exception as inst:
print "===================================================================\n"
print "MAIN THREAD: Error in Main Thread Server Loop: " +str(inst)+"\n"
print "===================================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in Main Thread Server Loop", "Self", "Self")
finally:
#print "Setting stop variable to stop all threads"
#print "Acquiring stopAllThreads Lock\n"
self.stopAllThreadsLock.acquire()
#print "Acquired stopAllThreads Lock\n"
self.stopAllThreads = True
#print "Releasing stopAllThreads Lock\n"
self.stopAllThreadsLock.release()
#print "Released stopAllThreads Lock\n"
#print "Sending done command to all clients, server is finished\n"
serverSocket.settimeout(0.25)
for key in self.dictionaryOfCurrentClientTasks.keys(): #This is potentially replaced by the sendDoneCommand in thread
try:
self.socketLock.acquire()
serverSocket.sendall("done")
self.socketLock.release()
# print "Sent done command to: " + str(key)+"\n"
except Exception as inst:
if(compareString(str(inst),"timed out",0,0,len("timed out"),len("timed out"))==True):
#print "Timed out while sending 'done' command to "+ str(key)+"\n"
fakeVar=True
else:
print "===========================================================\n"
print "MAIN THREAD ERROR in finally block send done command to clients: " +str(inst)+"\n"
print "============================================================\n"
pushCommandOntoTheStackOfIOCommands(self, "Main Thread ERROR in finally block sendDoneCommand", "Self", "Self")
#print "MAIN THREAD: Preparing to close the socket\n"
serverSocket.close()
# print "MAIN THREAD: The serverSocket has been closed\n"
sendDoneCommandToController(self)
# print "MAIN THREAD: Informed the Controller that Server has finished\n"
sendSolutionToController(self) #solution is saved in the class variable
print "-----------------------Stack of IO Commands---------------------------------\n"
for index in range(0,len(self.stackOfIOCommands)):
tempCommandName, tempOrigin_Destination, tempCommandDirection, tempTime = self.stackOfIOCommands.pop(0)
if(compareString(tempCommandDirection, "Inbound",0,0,len("Inbound"),len("Inbound"))==True):
print str(tempCommandDirection)+" command: "+str(tempCommandName)+" was received from: "+str(tempOrigin_Destination)+" at: "+str(tempTime)
else: #if outbound
print str(tempCommandDirection)+" command: "+str(tempCommandName)+" was sent to: "+str(tempOrigin_Destination)+" at: "+str(tempTime)
print "-----------------------End of Stack of IO Commands------------------------\n"
print "The Solution is: '"+str(self.theSolution)+"'\n"
|
unknown
|
codeparrot/codeparrot-clean
| ||
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import NumericProperty, ObjectProperty, BoundedNumericProperty, ListProperty
from .node import Node
from math import sqrt
class HexCanvas(FloatLayout):
last_node = ObjectProperty(None, allownone=True)
grid = ObjectProperty([])
row_count = BoundedNumericProperty(11, min=0, max=11)
column_count = BoundedNumericProperty(22, min=0, max=22)
vvhelix_id = NumericProperty(0)
scaffold_path = ListProperty([])
"""docstring for NanoCanvas"""
def __init__(self, **kwargs):
#super(HexCanvas, self).__init__(**kwargs)
super().__init__(**kwargs)
self.__construct()
def __construct(self):
x_start, y_start = 30, 30
a = 60
x_offset = a / 2
y_offset = a * sqrt(3) / 2
y = y_start
for j in range(self.row_count):
row = []
if j % 2 != 0:
offset = x_offset
else:
offset = 0
x = x_start + offset
for i in range(self.column_count):
node = Node(pos=(x, y), grid_id=(j, i))
row.append(node)
self.add_widget(node)
x += a
y += y_offset
self.grid.append(row)
def clean(self):
# TODO remove vhelixes and other stuff !!!
self.last_node = None
# for row in self.grid:
# for node in row:
# del node
self.grid = []
self.vvhelix_id = 0
self.scaffold_path = []
self.__construct()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Savoir-faire Linux (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class MgmtsystemHazardControlMeasure(models.Model):
_name = "mgmtsystem.hazard.control_measure"
_description = "Control Measure of hazard"
name = fields.Char(
'Control Measure',
required=True,
translate=True,
)
responsible_user_id = fields.Many2one(
'res.users',
'Responsible',
required=True,
)
comments = fields.Text('Comments')
hazard_id = fields.Many2one(
'mgmtsystem.hazard',
'Hazard',
ondelete='cascade',
required=False,
index=True,
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import datetime
import hashlib
import json
from pulp.server.db.model.base import Model
from pulp.server.db.model.reaper_base import ReaperMixin
from pulp.common import dateutils
# -- classes -----------------------------------------------------------------
class Consumer(Model):
"""
Represents a consumer of the content on Pulp server.
:ivar consumer_id: uniquely identifies the consumer
:type consumer_id: str
:ivar display_name: user-friendly name of the consumer
:type display_name: str
:ivar description: user-friendly description of the consumer
:type description: str
:ivar notes: arbitrary key-value pairs pragmatically describing the consumer
:type notes: dict
:ivar capabilities: operations permitted on the consumer
:type capabilities: dict
:ivar rsa_pub: The consumer's RSA public key used for message authentication.
:type rsa_pub: str
"""
RESOURCE_TEMPLATE = 'pulp:consumer:%s'
collection_name = 'consumers'
unique_indices = ('id',)
search_indices = ('notes',)
def __init__(self,
consumer_id,
display_name,
description=None,
notes=None,
capabilities=None,
rsa_pub=None):
"""
:param consumer_id: uniquely identifies the consumer
:type consumer_id: str
:param display_name: user-friendly name of the consumer
:type display_name: str
:param description: user-friendly description of the consumer
:type description: str
:param notes: arbitrary key-value pairs pragmatically describing the consumer
:type notes: dict
:param capabilities: operations permitted on the consumer
:type capabilities: dict
:param rsa_pub: The consumer's RSA public key used for authentication.
:type rsa_pub: str
"""
super(Consumer, self).__init__()
self.id = consumer_id
self.display_name = display_name
self.description = description
self.notes = notes or {}
self.capabilities = capabilities or {}
self.rsa_pub = rsa_pub
@classmethod
def build_resource_tag(cls, consumer_id):
"""
:param consumer_id: unique ID for a consumer
:type consumer_id: basestring
:return: a globally unique identifier for the consumer that can be
used in cross-type comparisons.
:rtype: basestring
"""
return cls.RESOURCE_TEMPLATE % consumer_id
class Bind(Model):
"""
Represents consumer binding to a repo/distributor.
Each consumer action entry will be of the format: {id:<str>, action:<str>, status:<str>}
* Status: (pending|succeeded|failed)
* Action: (bind|unbind)
:ivar consumer_id: uniquely identifies the consumer.
:type consumer_id: str
:ivar repo_id: uniquely identifies the repository
:type repo_id: str
:ivar distributor_id: uniquely identifies a distributor
:type distributor_id: str
:ivar notify_agent: indicates if the agent should be sent a message informing it of the binding
:type notify_agent: bool
:ivar binding_config: value only applicable to this particular binding
:type binding_config: object
:ivar consumer_actions: tracks consumer bind/unbind actions; see above for format
:ivar deleted: indicates the bind has been deleted
:type deleted: bool
"""
collection_name = 'consumer_bindings'
unique_indices = (
('repo_id', 'distributor_id', 'consumer_id'),
)
search_indices = (
('consumer_id',),
)
class Action:
# enumerated actions
BIND = 'bind'
UNBIND = 'unbind'
class Status:
# enumerated status
PENDING = 'pending'
SUCCEEDED = 'succeeded'
FAILED = 'failed'
def __init__(self, consumer_id, repo_id, distributor_id, notify_agent, binding_config):
"""
:param consumer_id: uniquely identifies the consumer.
:type consumer_id: str
:param repo_id: uniquely identifies the repository.
:type repo_id: str
:param distributor_id: uniquely identifies a distributor.
:type distributor_id: str
:ivar notify_agent: controls whether or not the consumer agent will be sent a message
about the binding
:type notify_agent: bool
:ivar binding_config: configuration to pass the distributor during payload creation for this
binding
:type binding_config: object
"""
super(Bind, self).__init__()
# Required, Unique
self.consumer_id = consumer_id
self.repo_id = repo_id
self.distributor_id = distributor_id
# Configuration
self.notify_agent = notify_agent
self.binding_config = binding_config
# State
self.consumer_actions = []
self.deleted = False
class RepoProfileApplicability(Model):
"""
This class models a Mongo collection that is used to store pre-calculated applicability results
for a given consumer profile_hash and repository ID. The applicability data is a dictionary
structure that represents the applicable units for the given profile and repository.
The profile itself is included here for ease of recalculating the applicability when a
repository's contents change.
The RepoProfileApplicabilityManager can be accessed through the classlevel "objects" attribute.
"""
collection_name = 'repo_profile_applicability'
unique_indices = (
('profile_hash', 'repo_id'),
)
def __init__(self, profile_hash, repo_id, profile, applicability, _id=None, **kwargs):
"""
Construct a RepoProfileApplicability object.
:param profile_hash: The hash of the profile that this object contains applicability data
for
:type profile_hash: basestring
:param repo_id: The repo ID that this applicability data is for
:type repo_id: basestring
:param profile: The entire profile that resulted in the profile_hash
:type profile: object
:param applicability: A dictionary mapping content_type_ids to lists of applicable Unit IDs.
:type applicability: dict
:param _id: The MongoDB ID for this object, if it exists in the database
:type _id: bson.objectid.ObjectId
:param kwargs: unused, but collected to allow instantiation from Mongo query results
:type kwargs: dict
"""
super(RepoProfileApplicability, self).__init__()
self.profile_hash = profile_hash
self.repo_id = repo_id
self.profile = profile
self.applicability = applicability
self._id = _id
# The superclass puts an unnecessary (and confusingly named) id attribute on this model.
# Let's remove it.
del self.id
def delete(self):
"""
Delete this RepoProfileApplicability object from the database.
"""
self.get_collection().remove({'_id': self._id})
def save(self):
"""
Save any changes made to this RepoProfileApplicability model to the database. If it doesn't
exist in the database already, insert a new record to represent it.
"""
# If this object's _id attribute is not None, then it represents an existing DB object.
# Else, we need to create an object with this object's attributes
new_document = {'profile_hash': self.profile_hash, 'repo_id': self.repo_id,
'profile': self.profile, 'applicability': self.applicability}
if self._id is not None:
self.get_collection().update({'_id': self._id}, new_document)
else:
# Let's set the _id attribute to the newly created document
self._id = self.get_collection().insert(new_document)
class UnitProfile(Model):
"""
Represents a consumer profile, which is a data structure that records which content is installed
on a particular consumer for a particular type.
Due to the nature of the data conversion used to generate a profile's hash, it is impossible for
Pulp to know if the ordering of list structures found in the profile are significant or
not. Therefore, the hash of a list must assume that the ordering of the list is significant. The
SON objects that Pulp stores in the database cannot contain Python sets.
It is up to the plugin Profilers to handle this limitation if they wish to store lists in
the database in such a way that the order shouldn't matter for hash comparison purposes. In
these cases, the Profiler must order the list in some repeatable manner, so that any two
profiles that it wants the platform to consider as being the same will have exactly the same
ordering to those lists.
For example, the RPM profile contains a list of dictionaries, where each dictionary contains
information about RPMs stored on a consumer. The order of the list is not important for the
purpose of determining what is installed on the system - it might as well be a set. However,
since a set cannot be stored in MongoDB, it is up to the RPM Profiler to sort the list of
installed RPMs in some repeatable fashion, such that any two consumers that have exactly the
same RPMs installed will end up with the same ordering of their RPMs in the database.
:ivar consumer_id: A consumer ID.
:type consumer_id: str
:ivar content_type: The profile (unit) type ID.
:type content_type: str
:ivar profile: The stored profile.
:type profile: object
:ivar profile_hash: A hash of the profile, used for quick comparisons of profiles
:type profile_hash: basestring
"""
collection_name = 'consumer_unit_profiles'
unique_indices = (
('consumer_id', 'content_type'),
)
def __init__(self, consumer_id, content_type, profile, profile_hash=None):
"""
:param consumer_id: A consumer ID.
:type consumer_id: str
:param content_type: The profile (unit) type ID.
:type content_type: str
:param profile: The stored profile.
:type profile: object
:param profile_hash: A hash of the profile, used for quick comparisons of profiles. If it is
None, the constructor will automatically calculate it based on the
profile.
:type profile_hash: basestring
"""
super(UnitProfile, self).__init__()
self.consumer_id = consumer_id
self.content_type = content_type
self.profile = profile
self.profile_hash = profile_hash
if self.profile_hash is None:
self.profile_hash = self.calculate_hash(self.profile)
@staticmethod
def calculate_hash(profile):
"""
Return a hash of profile. This hash is useful for
quickly comparing profiles to determine if they are the same.
:param profile: The profile structure you wish to hash
:type profile: object
:return: Hash of profile
:rtype: basestring
"""
# Don't use any whitespace in the json separators, and sort dictionary keys to be repeatable
serialized_profile = json.dumps(profile, separators=(',', ':'), sort_keys=True)
hasher = hashlib.sha256(serialized_profile)
return hasher.hexdigest()
class ConsumerHistoryEvent(Model, ReaperMixin):
"""
Represents a consumer history event.
The documents in this collection may be reaped, so it inherits from ReaperMixin.
:ivar consumer_id: identifies the consumer
:type consumer_id: str
:ivar originator: consumer or username of the admin who initiated the event
:type originator: str
:param type: event type
current supported event types: 'consumer_registered', 'consumer_unregistered',
'repo_bound', 'repo_unbound',
'content_unit_installed',
'content_unit_uninstalled',
'unit_profile_changed', 'added_to_group',
'removed_from_group'
:type type: str
:param details: event details
:type details: dict
"""
collection_name = 'consumer_history'
search_indices = ('consumer_id', 'originator', 'type', )
def __init__(self, consumer_id, originator, event_type, details):
super(ConsumerHistoryEvent, self).__init__()
self.consumer_id = consumer_id
self.originator = originator
self.type = event_type
self.details = details
now = datetime.datetime.now(dateutils.utc_tz())
self.timestamp = dateutils.format_iso8601_datetime(now)
class ConsumerGroup(Model):
"""
Represents a group of consumers.
"""
collection_name = 'consumer_groups'
search_indices = ('display_name', 'consumer_ids')
def __init__(self, consumer_group_id, display_name=None, description=None,
consumer_ids=None, notes=None):
super(ConsumerGroup, self).__init__()
self.id = consumer_group_id
self.display_name = display_name
self.description = description
self.consumer_ids = consumer_ids or []
self.notes = notes or {}
self.scratchpad = None
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.Dense'])
class Dense(keras_layers.Dense, base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
_reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.dense instead.')
@tf_export(v1=['layers.dense'])
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@tf_export(v1=['layers.Dropout'])
class Dropout(keras_layers.Dropout, base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(rate=rate,
noise_shape=noise_shape,
seed=seed,
name=name,
**kwargs)
def call(self, inputs, training=False):
return super(Dropout, self).call(inputs, training=training)
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.dropout instead.')
@tf_export(v1=['layers.dropout'])
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
@tf_export(v1=['layers.Flatten'])
class Flatten(keras_layers.Flatten, base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
pass
@deprecation.deprecated(
date=None,
instructions='Use keras.layers.flatten instead.')
@tf_export(v1=['layers.flatten'])
def flatten(inputs, name=None, data_format='channels_last'):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
Returns:
Reshaped tensor.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
layer = Flatten(name=name, data_format=data_format)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test node handling
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import urllib.parse
class NodeHandlingTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
def run_test(self):
###########################
# setban/listbanned tests #
###########################
assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point
self.nodes[2].setban("127.0.0.1", "add")
time.sleep(3) #wait till the nodes are disconected
assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].setban("127.0.0.0/24", "add")
assert_equal(len(self.nodes[2].listbanned()), 1)
try:
self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24
try:
self.nodes[2].setban("127.0.0.1", "remove")
except:
pass
assert_equal(len(self.nodes[2].listbanned()), 1)
self.nodes[2].setban("127.0.0.0/24", "remove")
assert_equal(len(self.nodes[2].listbanned()), 0)
self.nodes[2].clearbanned()
assert_equal(len(self.nodes[2].listbanned()), 0)
##test persisted banlist
self.nodes[2].setban("127.0.0.0/32", "add")
self.nodes[2].setban("127.0.0.0/24", "add")
self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds
self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds
listBeforeShutdown = self.nodes[2].listbanned()
assert_equal("192.168.0.1/32", listBeforeShutdown[2]['address']) #must be here
time.sleep(2) #make 100% sure we expired 192.168.0.1 node time
#stop node
stop_node(self.nodes[2], 2)
self.nodes[2] = start_node(2, self.options.tmpdir)
listAfterShutdown = self.nodes[2].listbanned()
assert_equal("127.0.0.0/24", listAfterShutdown[0]['address'])
assert_equal("127.0.0.0/32", listAfterShutdown[1]['address'])
assert_equal("/19" in listAfterShutdown[2]['address'], True)
###########################
# RPC disconnectnode test #
###########################
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
time.sleep(2) #disconnecting a node needs a little bit of time
for node in self.nodes[0].getpeerinfo():
assert(node['addr'] != url.hostname+":"+str(p2p_port(1)))
connect_nodes_bi(self.nodes,0,1) #reconnect the node
found = False
for node in self.nodes[0].getpeerinfo():
if node['addr'] == url.hostname+":"+str(p2p_port(1)):
found = True
assert(found)
if __name__ == '__main__':
NodeHandlingTest ().main ()
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"kind": "Dashboard",
"apiVersion": "dashboard.grafana.app/v2alpha1",
"metadata": {
"name": "v32.no_op_migration.v42"
},
"spec": {
"annotations": [
{
"kind": "AnnotationQuery",
"spec": {
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"query": {
"kind": "grafana",
"spec": {}
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"builtIn": true,
"legacyOptions": {
"type": "dashboard"
}
}
},
{
"kind": "AnnotationQuery",
"spec": {
"query": {
"kind": "",
"spec": {}
},
"enable": true,
"hide": false,
"iconColor": "",
"name": "Deployments"
}
}
],
"cursorSync": "Off",
"editable": true,
"elements": {
"panel-1": {
"kind": "Panel",
"spec": {
"id": 1,
"title": "Panel with transformations remains unchanged",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [
{
"kind": "labelsToFields",
"spec": {
"id": "labelsToFields",
"options": {
"keepLabels": [
"job",
"instance"
],
"mode": "rows"
}
}
},
{
"kind": "merge",
"spec": {
"id": "merge",
"options": {}
}
}
],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-2": {
"kind": "Panel",
"spec": {
"id": 2,
"title": "Graph panel remains unchanged",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [
{
"kind": "PanelQuery",
"spec": {
"query": {
"kind": "prometheus",
"spec": {}
},
"datasource": {
"type": "prometheus",
"uid": "default-ds-uid"
},
"refId": "A",
"hidden": false
}
}
],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "timeseries",
"spec": {
"pluginVersion": "",
"options": {
"__angularMigration": {
"autoMigrateFrom": "graph",
"originalOptions": {
"yAxes": [
{
"show": true
}
]
}
}
},
"fieldConfig": {
"defaults": {},
"overrides": []
}
}
}
}
},
"panel-4": {
"kind": "Panel",
"spec": {
"id": 4,
"title": "Nested stat panel",
"description": "",
"links": [],
"data": {
"kind": "QueryGroup",
"spec": {
"queries": [],
"transformations": [],
"queryOptions": {}
}
},
"vizConfig": {
"kind": "stat",
"spec": {
"pluginVersion": "",
"options": {},
"fieldConfig": {
"defaults": {
"unit": "bytes"
},
"overrides": []
}
}
}
}
}
},
"layout": {
"kind": "RowsLayout",
"spec": {
"rows": [
{
"kind": "RowsLayoutRow",
"spec": {
"title": "",
"collapse": false,
"hideHeader": true,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-1"
}
}
},
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-2"
}
}
}
]
}
}
}
},
{
"kind": "RowsLayoutRow",
"spec": {
"title": "Row with nested panels",
"collapse": false,
"layout": {
"kind": "GridLayout",
"spec": {
"items": [
{
"kind": "GridLayoutItem",
"spec": {
"x": 0,
"y": 0,
"width": 6,
"height": 3,
"element": {
"kind": "ElementReference",
"name": "panel-4"
}
}
}
]
}
}
}
}
]
}
},
"links": [],
"liveNow": false,
"preload": false,
"tags": [],
"timeSettings": {
"timezone": "",
"from": "now-6h",
"to": "now",
"autoRefresh": "",
"autoRefreshIntervals": [
"5s",
"10s",
"30s",
"1m",
"5m",
"15m",
"30m",
"1h",
"2h",
"1d"
],
"hideTimepicker": false,
"fiscalYearStartMonth": 0
},
"title": "V32 No-Op Migration Test Dashboard",
"variables": [
{
"kind": "QueryVariable",
"spec": {
"name": "environment",
"current": {
"text": "",
"value": ""
},
"hide": "dontHide",
"refresh": "never",
"skipUrlSync": false,
"query": {
"kind": "prometheus",
"spec": {}
},
"regex": "",
"sort": "disabled",
"options": [],
"multi": false,
"includeAll": false,
"allowCustomValue": true
}
}
]
},
"status": {
"conversion": {
"failed": false,
"storedVersion": "v1beta1"
}
}
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/conversion/testdata/migrated_dashboards_output/v1beta1-mig-v32.no_op_migration.v42.v2alpha1.json
|
/*
* Copyright (c) 2020, Michael Grunder <michael dot grunder at gmail dot com>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Redis nor the names of its contributors may be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* SDS compatibility header.
*
* This simple file maps sds types and calls to their unique hiredis symbol names.
* It's useful when we build Hiredis as a dependency of Redis and want to call
* Hiredis' sds symbols rather than the ones built into Redis, as the libraries
* have slightly diverged and could cause hard to track down ABI incompatibility
* bugs.
*
*/
#ifndef HIREDIS_SDS_COMPAT
#define HIREDIS_SDS_COMPAT
#define sds hisds
#define sdslen hi_sdslen
#define sdsavail hi_sdsavail
#define sdssetlen hi_sdssetlen
#define sdsinclen hi_sdsinclen
#define sdsalloc hi_sdsalloc
#define sdssetalloc hi_sdssetalloc
#define sdsAllocPtr hi_sdsAllocPtr
#define sdsAllocSize hi_sdsAllocSize
#define sdscat hi_sdscat
#define sdscatfmt hi_sdscatfmt
#define sdscatlen hi_sdscatlen
#define sdscatprintf hi_sdscatprintf
#define sdscatrepr hi_sdscatrepr
#define sdscatsds hi_sdscatsds
#define sdscatvprintf hi_sdscatvprintf
#define sdsclear hi_sdsclear
#define sdscmp hi_sdscmp
#define sdscpy hi_sdscpy
#define sdscpylen hi_sdscpylen
#define sdsdup hi_sdsdup
#define sdsempty hi_sdsempty
#define sds_free hi_sds_free
#define sdsfree hi_sdsfree
#define sdsfreesplitres hi_sdsfreesplitres
#define sdsfromlonglong hi_sdsfromlonglong
#define sdsgrowzero hi_sdsgrowzero
#define sdsIncrLen hi_sdsIncrLen
#define sdsjoin hi_sdsjoin
#define sdsjoinsds hi_sdsjoinsds
#define sdsll2str hi_sdsll2str
#define sdsMakeRoomFor hi_sdsMakeRoomFor
#define sds_malloc hi_sds_malloc
#define sdsmapchars hi_sdsmapchars
#define sdsnew hi_sdsnew
#define sdsnewlen hi_sdsnewlen
#define sdsrange hi_sdsrange
#define sds_realloc hi_sds_realloc
#define sdsRemoveFreeSpace hi_sdsRemoveFreeSpace
#define sdssplitargs hi_sdssplitargs
#define sdssplitlen hi_sdssplitlen
#define sdstolower hi_sdstolower
#define sdstoupper hi_sdstoupper
#define sdstrim hi_sdstrim
#define sdsull2str hi_sdsull2str
#define sdsupdatelen hi_sdsupdatelen
#endif /* HIREDIS_SDS_COMPAT */
|
c
|
github
|
https://github.com/redis/redis
|
deps/hiredis/sdscompat.h
|
#ifndef SRC_JSON_UTILS_H_
#define SRC_JSON_UTILS_H_
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#include <iomanip>
#include <limits>
#include <ostream>
#include <string>
#include <string_view>
namespace node {
constexpr bool NeedsJsonEscape(std::string_view str) {
for (const char c : str) {
if (c == '\\' || c == '"' || c < 0x20) return true;
}
return false;
}
std::string EscapeJsonChars(std::string_view str);
std::string Reindent(const std::string& str, int indentation);
// JSON compiler definitions.
class JSONWriter {
public:
JSONWriter(std::ostream& out, bool compact)
: out_(out), compact_(compact) {}
private:
inline void indent() { indent_ += 2; }
inline void deindent() { indent_ -= 2; }
inline void advance() {
if (compact_) return;
for (int i = 0; i < indent_; i++) out_ << ' ';
}
inline void write_one_space() {
if (compact_) return;
out_ << ' ';
}
inline void write_new_line() {
if (compact_) return;
out_ << '\n';
}
public:
inline void json_start() {
if (state_ == kAfterValue) out_ << ',';
write_new_line();
advance();
out_ << '{';
indent();
state_ = kObjectStart;
}
inline void json_end() {
write_new_line();
deindent();
advance();
out_ << '}';
state_ = kAfterValue;
}
template <typename T>
inline void json_objectstart(T key) {
if (state_ == kAfterValue) out_ << ',';
write_new_line();
advance();
write_string(key);
out_ << ':';
write_one_space();
out_ << '{';
indent();
state_ = kObjectStart;
}
template <typename T>
inline void json_arraystart(T key) {
if (state_ == kAfterValue) out_ << ',';
write_new_line();
advance();
write_string(key);
out_ << ':';
write_one_space();
out_ << '[';
indent();
state_ = kObjectStart;
}
inline void json_objectend() {
write_new_line();
deindent();
advance();
out_ << '}';
if (indent_ == 0) {
// Top-level object is complete, so end the line.
out_ << '\n';
}
state_ = kAfterValue;
}
inline void json_arrayend() {
write_new_line();
deindent();
advance();
out_ << ']';
state_ = kAfterValue;
}
template <typename T, typename U>
inline void json_keyvalue(const T& key, const U& value) {
if (state_ == kAfterValue) out_ << ',';
write_new_line();
advance();
write_string(key);
out_ << ':';
write_one_space();
write_value(value);
state_ = kAfterValue;
}
template <typename U>
inline void json_element(const U& value) {
if (state_ == kAfterValue) out_ << ',';
write_new_line();
advance();
write_value(value);
state_ = kAfterValue;
}
struct Null {}; // Usable as a JSON value.
struct ForeignJSON {
std::string as_string;
};
private:
template <typename T,
typename test_for_number = typename std::
enable_if<std::numeric_limits<T>::is_specialized, bool>::type>
inline void write_value(T number) {
if constexpr (std::is_same<T, bool>::value)
out_ << (number ? "true" : "false");
else
out_ << number;
}
inline void write_value(Null null) { out_ << "null"; }
inline void write_value(std::string_view str) { write_string(str); }
inline void write_value(const ForeignJSON& json) {
out_ << Reindent(json.as_string, indent_);
}
inline void write_string(std::string_view str) {
out_ << '"';
if (NeedsJsonEscape(str)) // only create temporary std::string if necessary
out_ << EscapeJsonChars(str);
else
out_ << str;
out_ << '"';
}
enum JSONState { kObjectStart, kAfterValue };
std::ostream& out_;
bool compact_;
int indent_ = 0;
int state_ = kObjectStart;
};
} // namespace node
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
#endif // SRC_JSON_UTILS_H_
|
c
|
github
|
https://github.com/nodejs/node
|
src/json_utils.h
|
from django.conf import settings
from django.db import models
from meetup.api import MeetupClient
import datetime
STATUSES = [(s, s) for s in ('past','pending','upcoming')]
API_KEY = getattr(settings, 'MEETUP_KEY', None)
class Account(models.Model):
key = models.CharField(max_length=128)
description = models.CharField(max_length=128)
slug = models.SlugField()
container_id = models.CharField(max_length=16, blank=True)
meetup_url = models.URLField(verify_exists=False, blank=True)
sync = models.BooleanField(default=True)
def __unicode__(self):
return self.slug
def past_events(self):
return self.events.filter(status='past')
def upcoming_events(self):
return self.events.exclude(status='past')
class EventManager(models.Manager):
def past(self):
return Event.objects.filter(status='past')
def upcoming(self):
return Event.objects.exclude(status='past')
class Event(models.Model):
objects = EventManager()
account = models.ForeignKey(Account, related_name="events")
# Meetup.com fields
id = models.CharField(max_length=255, primary_key=True)
meetup_url = models.URLField(verify_exists=False)
title = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
start_time = models.DateTimeField(blank=True, null=True)
location = models.CharField(max_length=255, blank=True)
address = models.CharField(max_length=128, blank=True)
city = models.CharField(max_length=64, blank=True)
state = models.CharField(max_length=64, blank=True)
zipcode = models.CharField(max_length=10, blank=True)
latitude = models.CharField(max_length=16, blank=True)
longitude = models.CharField(max_length=16, blank=True)
url = models.URLField(verify_exists=False, max_length=255, blank=True)
rsvp_count = models.IntegerField(default=0)
timestamp = models.DateTimeField()
status = models.CharField(max_length=16, choices=STATUSES)
organizer_id = models.CharField(max_length=32, blank=True)
organizer_name = models.CharField(max_length=128, blank=True)
# user defined fields
# none for now, add tags later
class Meta:
ordering = ('start_time',)
def __unicode__(self):
return self.pk
def save(self, sync=True, **kwargs):
super(Event, self).save(**kwargs)
# if sync:
# api_client = MeetupClient(self.account.key)
# api_client.update_event(self.pk, udf_category=self.category)
def city_state(self):
if self.city:
if self.state:
return "%s, %s" % (self.city, self.state)
else:
return self.city
elif self.state:
return self.state
else:
return ''
def short_description(self, length=64):
if len(self.description) > length:
desc = self.description[:length]
if desc.endswith(' '):
desc = desc[:-1]
return desc + '...'
return self.description
|
unknown
|
codeparrot/codeparrot-clean
| ||
import numpy
import itertools
import operator
from tm_dataset import PytablesBitextIterator
class HomogenousData(PytablesBitextIterator):
def __init__(self, *args, **kwargs):
PytablesBitextIterator.__init__(self, *args, **kwargs)
self.batch_iter = None
def get_homogenous_batch_iter(self):
end_of_iter = False
while True:
k_batches = 10
batch_size = self.batch_size
x = []
y = []
for k in xrange(k_batches):
try:
dx, dy = PytablesBitextIterator.next(self)
except StopIteration:
end_of_iter = True
break
if dx == None or dy == None:
break
x += dx
y += dy
if len(x) <= 0 or len(y) <= 0:
raise StopIteration
lens = numpy.asarray([map(len, x), map(len, y)])
order = numpy.argsort(lens.max(axis=0)) if k_batches > 1 else numpy.arange(len(x))
for k in range(k_batches):
if k * batch_size > len(order):
break
indices = order[k * batch_size:(k + 1) * batch_size]
yield [[x[ii] for ii in indices], [y[ii] for ii in indices]]
if end_of_iter:
raise StopIteration
def next(self, peek=False):
if not self.batch_iter:
self.batch_iter = self.get_homogenous_batch_iter()
if not self.batch_iter:
raise StopIteration
try:
batch = next(self.batch_iter)
except StopIteration:
self.batch_iter = None
raise StopIteration
return batch[0], batch[1]
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
import sys
from django.db.backends import *
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeText, SafeBytes
from django.utils.timezone import utc
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeBytes, psycopg2.extensions.QuotedString)
psycopg2.extensions.register_adapter(SafeText, psycopg2.extensions.QuotedString)
def utc_tzinfo_factory(offset):
if offset != 0:
raise AssertionError("database connection isn't set to UTC")
return utc
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
can_return_id_from_insert = True
requires_rollback_on_dirty_transaction = True
has_real_datatype = True
can_defer_constraint_checks = True
has_select_for_update = True
has_select_for_update_nowait = True
has_bulk_insert = True
uses_savepoints = True
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
can_rollback_ddl = True
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'postgresql'
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
Database = Database
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
opts = self.settings_dict["OPTIONS"]
RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
self.isolation_level = opts.get('isolation_level', RC)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
conn_params = {
'database': settings_dict['NAME'],
}
conn_params.update(settings_dict['OPTIONS'])
if 'autocommit' in conn_params:
del conn_params['autocommit']
if 'isolation_level' in conn_params:
del conn_params['isolation_level']
if settings_dict['USER']:
conn_params['user'] = settings_dict['USER']
if settings_dict['PASSWORD']:
conn_params['password'] = force_str(settings_dict['PASSWORD'])
if settings_dict['HOST']:
conn_params['host'] = settings_dict['HOST']
if settings_dict['PORT']:
conn_params['port'] = settings_dict['PORT']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(**conn_params)
def init_connection_state(self):
settings_dict = self.settings_dict
self.connection.set_client_encoding('UTF8')
tz = 'UTC' if settings.USE_TZ else settings_dict.get('TIME_ZONE')
if tz:
try:
get_parameter_status = self.connection.get_parameter_status
except AttributeError:
# psycopg2 < 2.0.12 doesn't have get_parameter_status
conn_tz = None
else:
conn_tz = get_parameter_status('TimeZone')
if conn_tz != tz:
self.connection.cursor().execute(
self.ops.set_time_zone_sql(), [tz])
# Commit after setting the time zone (see #17062)
self.connection.commit()
self.connection.set_isolation_level(self.isolation_level)
def create_cursor(self):
cursor = self.connection.cursor()
cursor.tzinfo_factory = utc_tzinfo_factory if settings.USE_TZ else None
return cursor
def _set_isolation_level(self, isolation_level):
assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
if self.psycopg2_version >= (2, 4, 2):
self.connection.set_session(isolation_level=isolation_level)
else:
self.connection.set_isolation_level(isolation_level)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
if self.psycopg2_version >= (2, 4, 2):
self.connection.autocommit = autocommit
else:
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = self.isolation_level
self.connection.set_isolation_level(level)
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
self.connection.cursor().execute("SELECT 1")
except Database.Error:
return False
else:
return True
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.'))
@cached_property
def pg_version(self):
with self.temporary_connection():
return get_version(self.connection)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
from textwrap import dedent
from pants.backend.jvm.register import build_file_aliases as register_jvm
from pants.backend.jvm.targets.exclude import Exclude
from pants.backend.jvm.targets.jvm_binary import (Duplicate, JarRules, JvmBinary, ManifestEntries,
Skip)
from pants.base.address import BuildFileAddress
from pants.base.exceptions import TargetDefinitionException
from pants.base.payload_field import FingerprintedField
from pants.base.target import Target
from pants_test.base_test import BaseTest
class JarRulesTest(unittest.TestCase):
def test_jar_rule(self):
dup_rule = Duplicate('foo', Duplicate.REPLACE)
self.assertEquals('Duplicate(apply_pattern=foo, action=REPLACE)',
repr(dup_rule))
skip_rule = Skip('foo')
self.assertEquals('Skip(apply_pattern=foo)', repr(skip_rule))
def test_invalid_apply_pattern(self):
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'):
Skip(None)
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern is not a string'):
Duplicate(None, Duplicate.SKIP)
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'):
Skip(r')')
with self.assertRaisesRegexp(ValueError, r'The supplied apply_pattern: \) is not a valid'):
Duplicate(r')', Duplicate.SKIP)
def test_bad_action(self):
with self.assertRaisesRegexp(ValueError, r'The supplied action must be one of'):
Duplicate('foo', None)
def test_duplicate_error(self):
with self.assertRaisesRegexp(Duplicate.Error, r'Duplicate entry encountered for path foo'):
raise Duplicate.Error('foo')
def test_default(self):
jar_rules = JarRules.default()
self.assertTrue(4, len(jar_rules.rules))
for rule in jar_rules.rules:
self.assertTrue(rule.apply_pattern.pattern.startswith(r'^META-INF'))
def test_set_bad_default(self):
with self.assertRaisesRegexp(ValueError, r'The default rules must be a JarRules'):
JarRules.set_default(None)
class JvmBinaryTest(BaseTest):
@property
def alias_groups(self):
return register_jvm()
def test_simple(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
basename='foo-base',
)
'''))
target = self.target('//:foo')
self.assertEquals('com.example.Foo', target.main)
self.assertEquals('com.example.Foo', target.payload.main)
self.assertEquals('foo-base', target.basename)
self.assertEquals('foo-base', target.payload.basename)
self.assertEquals([], target.deploy_excludes)
self.assertEquals([], target.payload.deploy_excludes)
self.assertEquals(JarRules.default(), target.deploy_jar_rules)
self.assertEquals(JarRules.default(), target.payload.deploy_jar_rules)
self.assertEquals({}, target.payload.manifest_entries.entries);
def test_default_base(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
)
'''))
target = self.target('//:foo')
self.assertEquals('foo', target.basename)
def test_deploy_jar_excludes(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_excludes=[exclude(org='example.com', name='foo-lib')],
)
'''))
target = self.target('//:foo')
self.assertEquals([Exclude(org='example.com', name='foo-lib')],
target.deploy_excludes)
def test_deploy_jar_rules(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_jar_rules=jar_rules([Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL)
)
'''))
target = self.target('//:foo')
jar_rules = target.deploy_jar_rules
self.assertEquals(1, len(jar_rules.rules))
self.assertEquals('foo', jar_rules.rules[0].apply_pattern.pattern)
self.assertEquals(repr(Duplicate.SKIP),
repr(jar_rules.rules[0].action)) # <object object at 0x...>
self.assertEquals(Duplicate.FAIL, jar_rules.default_dup_action)
def test_bad_source_declaration(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
source=['foo.py'],
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*foo.*source must be a single'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo'))
def test_bad_sources_declaration(self):
with self.assertRaisesRegexp(Target.IllegalArgument,
r'jvm_binary only supports a single "source" argument'):
self.make_target('foo:foo', target_type=JvmBinary, main='com.example.Foo', sources=['foo.py'])
def test_bad_main_declaration(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='bar',
main=['com.example.Bar'],
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*bar.*main must be a fully'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'bar'))
def test_bad_jar_rules(self):
build_file = self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
deploy_jar_rules='invalid',
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary.*foo.*'
r'deploy_jar_rules must be a JarRules specification. got str'):
self.build_graph.inject_address_closure(BuildFileAddress(build_file, 'foo'))
def _assert_fingerprints_not_equal(self, fields):
for field in fields:
for other_field in fields:
if field == other_field:
continue
self.assertNotEquals(field.fingerprint(), other_field.fingerprint())
def test_jar_rules_field(self):
field1 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)]))
field1_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)]))
field2 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.CONCAT)]))
field3 = FingerprintedField(JarRules(rules=[Duplicate('bar', Duplicate.SKIP)]))
field4 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP),
Duplicate('bar', Duplicate.SKIP)]))
field5 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP), Skip('foo')]))
field6 = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL))
field6_same = FingerprintedField(JarRules(rules=[Duplicate('foo', Duplicate.SKIP)],
default_dup_action=Duplicate.FAIL))
field7 = FingerprintedField(JarRules(rules=[Skip('foo')]))
field8 = FingerprintedField(JarRules(rules=[Skip('bar')]))
field8_same = FingerprintedField(JarRules(rules=[Skip('bar')]))
self.assertEquals(field1.fingerprint(), field1_same.fingerprint())
self.assertEquals(field6.fingerprint(), field6_same.fingerprint())
self.assertEquals(field8.fingerprint(), field8_same.fingerprint())
self._assert_fingerprints_not_equal([field1, field2, field3, field4, field5, field6, field7])
def test_manifest_entries(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
manifest_entries= {
'Foo-Field' : 'foo',
}
)
'''))
target = self.target('//:foo')
self.assertTrue(isinstance(target.payload.manifest_entries, ManifestEntries))
entries = target.payload.manifest_entries.entries
self.assertEquals({ 'Foo-Field' : 'foo'}, entries)
def test_manifest_not_dict(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
manifest_entries= 'foo',
)
'''))
with self.assertRaisesRegexp(TargetDefinitionException,
r'Invalid target JvmBinary\(BuildFileAddress\(.*BUILD\), foo\)\): '
r'manifest_entries must be a dict. got str'):
self.target('//:foo')
def test_manifest_bad_key(self):
self.add_to_build_file('BUILD', dedent('''
jvm_binary(name='foo',
main='com.example.Foo',
manifest_entries= {
jar(org='bad', name='bad', rev='bad') : 'foo',
}
)
'''))
with self.assertRaisesRegexp(ManifestEntries.ExpectedDictionaryError,
r'entries must be dictionary of strings, got key bad-bad-bad type JarDependency'):
self.target('//:foo')
def test_manifest_entries_fingerprint(self):
field1 = ManifestEntries()
field2 = ManifestEntries({'Foo-Field' : 'foo'})
field2_same = ManifestEntries({'Foo-Field' : 'foo'})
field3 = ManifestEntries({'Foo-Field' : 'foo', 'Bar-Field' : 'bar'})
self.assertEquals(field2.fingerprint(), field2_same.fingerprint())
self._assert_fingerprints_not_equal([field1, field2, field3])
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2024 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package sql
import (
"context"
"github.com/cockroachdb/cockroach/pkg/docs"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgnotice"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/redact"
)
// The below methods are ordered in alphabetical order. They represent statements
// which are UNIMPLEMENTED for the legacy schema changer.
func (p *planner) AlterPolicy(ctx context.Context, n *tree.AlterPolicy) (planNode, error) {
return nil, makeUnimplementedLegacyError("ALTER POLICY")
}
func (p *planner) CommentOnType(ctx context.Context, n *tree.CommentOnType) (planNode, error) {
return nil, makeUnimplementedLegacyError("COMMENT ON TYPE")
}
func (p *planner) CreatePolicy(ctx context.Context, n *tree.CreatePolicy) (planNode, error) {
return nil, makeUnimplementedLegacyError("CREATE POLICY")
}
func (p *planner) CreateTrigger(_ context.Context, _ *tree.CreateTrigger) (planNode, error) {
return nil, makeUnimplementedLegacyError("CREATE TRIGGER")
}
func (p *planner) DropOwnedBy(ctx context.Context) (planNode, error) {
if err := checkSchemaChangeEnabled(
ctx,
p.ExecCfg(),
"DROP OWNED BY",
); err != nil {
return nil, err
}
return nil, makeUnimplementedLegacyError("DROP OWNED BY")
}
func (p *planner) DropPolicy(ctx context.Context, n *tree.DropPolicy) (planNode, error) {
return nil, makeUnimplementedLegacyError("DROP POLICY")
}
func (p *planner) DropTrigger(_ context.Context, _ *tree.DropTrigger) (planNode, error) {
return nil, makeUnimplementedLegacyError("DROP TRIGGER")
}
// makeUnimplementedLegacyError creates an error message with a hint and detail for a statement that
// is only implemented in the declarative schema changer and not in the legacy schema changer.
func makeUnimplementedLegacyError(stmtSyntax redact.SafeString) error {
implicitTransactionHint := "This error may be happening due to running it in a multi-statement transaction." +
" Try sending each schema change statement in its own implicit transaction."
dscDocDetail := " See the documentation for additional details:" +
docs.URL("online-schema-changes#declarative-schema-changer")
return errors.WithDetail(
errors.WithHint(
pgerror.Newf(
pgcode.FeatureNotSupported,
"%s is only implemented in the declarative schema changer", stmtSyntax,
),
implicitTransactionHint,
),
dscDocDetail,
)
}
// AlterTableSetLogged set table as unlogged or logged.
// No-op since unlogged tables are not supported.
func (p *planner) AlterTableSetLogged(
ctx context.Context, n *tree.AlterTableSetLogged,
) (planNode, error) {
operation := redact.SafeString("LOGGED")
if !n.IsLogged {
operation = redact.SafeString("UNLOGGED")
}
p.BufferClientNotice(
ctx, pgnotice.Newf(
"SET %s is not supported and has no effect",
operation,
),
)
return &zeroNode{}, nil
}
|
go
|
github
|
https://github.com/cockroachdb/cockroach
|
pkg/sql/unimplemented.go
|
"""
Generic thread pool class. Modeled after Java's ThreadPoolExecutor.
Please note that this ThreadPool does *not* fully implement the PEP 3148
ThreadPool!
"""
from threading import Thread, Lock, currentThread
from weakref import ref
import logging
import atexit
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
_threadpools = set()
# Worker threads are daemonic in order to let the interpreter exit without
# an explicit shutdown of the thread pool. The following trick is necessary
# to allow worker threads to finish cleanly.
def _shutdown_all():
for pool_ref in tuple(_threadpools):
pool = pool_ref()
if pool:
pool.shutdown()
atexit.register(_shutdown_all)
class ThreadPool(object):
def __init__(self, core_threads=0, max_threads=20, keepalive=1):
"""
:param core_threads: maximum number of persistent threads in the pool
:param max_threads: maximum number of total threads in the pool
:param thread_class: callable that creates a Thread object
:param keepalive: seconds to keep non-core worker threads waiting
for new tasks
"""
self.core_threads = core_threads
self.max_threads = max(max_threads, core_threads, 1)
self.keepalive = keepalive
self._queue = Queue()
self._threads_lock = Lock()
self._threads = set()
self._shutdown = False
_threadpools.add(ref(self))
logger.info('Started thread pool with %d core threads and %s maximum '
'threads', core_threads, max_threads or 'unlimited')
def _adjust_threadcount(self):
self._threads_lock.acquire()
try:
if self.num_threads < self.max_threads:
self._add_thread(self.num_threads < self.core_threads)
finally:
self._threads_lock.release()
def _add_thread(self, core):
t = Thread(target=self._run_jobs, args=(core,))
t.setDaemon(True)
t.start()
self._threads.add(t)
def _run_jobs(self, core):
logger.debug('Started worker thread')
block = True
timeout = None
if not core:
block = self.keepalive > 0
timeout = self.keepalive
while True:
try:
func, args, kwargs = self._queue.get(block, timeout)
except Empty:
break
if self._shutdown:
break
try:
func(*args, **kwargs)
except:
logger.exception('Error in worker thread')
self._threads_lock.acquire()
self._threads.remove(currentThread())
self._threads_lock.release()
logger.debug('Exiting worker thread')
@property
def num_threads(self):
return len(self._threads)
def submit(self, func, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new tasks after shutdown')
self._queue.put((func, args, kwargs))
self._adjust_threadcount()
def shutdown(self, wait=True):
if self._shutdown:
return
logging.info('Shutting down thread pool')
self._shutdown = True
_threadpools.remove(ref(self))
self._threads_lock.acquire()
for _ in range(self.num_threads):
self._queue.put((None, None, None))
self._threads_lock.release()
if wait:
self._threads_lock.acquire()
threads = tuple(self._threads)
self._threads_lock.release()
for thread in threads:
thread.join()
def __repr__(self):
if self.max_threads:
threadcount = '%d/%d' % (self.num_threads, self.max_threads)
else:
threadcount = '%d' % self.num_threads
return '<ThreadPool at %x; threads=%s>' % (id(self), threadcount)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#pragma once
#include <c10/util/BFloat16.h>
#include <c10/util/Half.h>
C10_CLANG_DIAGNOSTIC_PUSH()
#if C10_CLANG_HAS_WARNING("-Wimplicit-float-conversion")
C10_CLANG_DIAGNOSTIC_IGNORE("-Wimplicit-float-conversion")
#endif
namespace c10 {
template <typename T>
struct is_reduced_floating_point
: std::integral_constant<
bool,
std::is_same_v<T, c10::Half> || std::is_same_v<T, c10::BFloat16>> {};
template <typename T>
constexpr bool is_reduced_floating_point_v =
is_reduced_floating_point<T>::value;
} // namespace c10
namespace std {
#if !defined(FBCODE_CAFFE2) && !defined(C10_NODEPRECATED)
using c10::is_reduced_floating_point;
using c10::is_reduced_floating_point_v;
#endif // !defined(FBCODE_CAFFE2) && !defined(C10_NODEPRECATED)
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T acos(T a) {
return std::acos(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T asin(T a) {
return std::asin(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T atan(T a) {
return std::atan(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T atanh(T a) {
return std::atanh(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T erf(T a) {
return std::erf(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T erfc(T a) {
return std::erfc(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T exp(T a) {
return std::exp(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T expm1(T a) {
return std::expm1(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline bool isfinite(T a) {
return std::isfinite(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T log(T a) {
return std::log(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T log10(T a) {
return std::log10(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T log1p(T a) {
return std::log1p(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T log2(T a) {
return std::log2(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T ceil(T a) {
return std::ceil(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T cos(T a) {
return std::cos(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T floor(T a) {
return std::floor(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T nearbyint(T a) {
return std::nearbyint(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T sin(T a) {
return std::sin(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T tan(T a) {
return std::tan(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T sinh(T a) {
return std::sinh(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T cosh(T a) {
return std::cosh(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T tanh(T a) {
return std::tanh(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T trunc(T a) {
return std::trunc(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T lgamma(T a) {
return std::lgamma(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T sqrt(T a) {
return std::sqrt(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T rsqrt(T a) {
return 1.0 / std::sqrt(float(a));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T abs(T a) {
return std::abs(float(a));
}
#if defined(_MSC_VER) && defined(__CUDACC__)
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T pow(T a, double b) {
return std::pow(float(a), float(b));
}
#else
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T pow(T a, double b) {
return std::pow(float(a), b);
}
#endif
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T pow(T a, T b) {
return std::pow(float(a), float(b));
}
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
inline T fmod(T a, T b) {
return std::fmod(float(a), float(b));
}
/*
The following function is inspired from the implementation in `musl`
Link to License: https://git.musl-libc.org/cgit/musl/tree/COPYRIGHT
----------------------------------------------------------------------
Copyright © 2005-2020 Rich Felker, et al.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
*/
template <
typename T,
typename std::enable_if_t<c10::is_reduced_floating_point_v<T>, int> = 0>
C10_HOST_DEVICE inline T nextafter(T from, T to) {
// Reference:
// https://git.musl-libc.org/cgit/musl/tree/src/math/nextafter.c
using int_repr_t = uint16_t;
constexpr uint8_t bits = 16;
union {
T f;
int_repr_t i;
} ufrom = {from}, uto = {to};
// get a mask to get the sign bit i.e. MSB
int_repr_t sign_mask = int_repr_t{1} << (bits - 1);
// short-circuit: if either is NaN, return NaN
if (from != from || to != to) {
return from + to;
}
// short-circuit: if they are exactly the same.
if (ufrom.i == uto.i) {
return from;
}
// mask the sign-bit to zero i.e. positive
// equivalent to abs(x)
int_repr_t abs_from = ufrom.i & ~sign_mask;
int_repr_t abs_to = uto.i & ~sign_mask;
if (abs_from == 0) {
// if both are zero but with different sign,
// preserve the sign of `to`.
if (abs_to == 0) {
return to;
}
// smallest subnormal with sign of `to`.
ufrom.i = (uto.i & sign_mask) | int_repr_t{1};
return ufrom.f;
}
// if abs(from) > abs(to) or sign(from) != sign(to)
if (abs_from > abs_to || ((ufrom.i ^ uto.i) & sign_mask)) {
ufrom.i--;
} else {
ufrom.i++;
}
return ufrom.f;
}
} // namespace std
C10_CLANG_DIAGNOSTIC_POP()
|
c
|
github
|
https://github.com/pytorch/pytorch
|
c10/util/BFloat16-math.h
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.page import page_measurement_results
class BlockPageMeasurementResults(
page_measurement_results.PageMeasurementResults):
def __init__(self, output_file):
super(BlockPageMeasurementResults, self).__init__()
self._output_file = output_file
def DidMeasurePage(self):
page_values = self.values_for_current_page
if not page_values.values:
# Do not output if no results were added on this page.
super(BlockPageMeasurementResults, self).DidMeasurePage()
return
lines = ['name: %s' %
self.values_for_current_page.page.display_name]
sorted_measurement_names = page_values.measurement_names
sorted_measurement_names.sort()
for measurement_name in sorted_measurement_names:
value = page_values.FindValueByMeasurementName(measurement_name)
lines.append('%s (%s): %s' %
(measurement_name,
value.units,
value.output_value))
for line in lines:
self._output_file.write(line)
self._output_file.write(os.linesep)
self._output_file.write(os.linesep)
self._output_file.flush()
super(BlockPageMeasurementResults, self).DidMeasurePage()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.conf.urls.defaults import *
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext, Template
def add(request, message_type):
# don't default to False here, because we want to test that it defaults
# to False if unspecified
fail_silently = request.POST.get('fail_silently', None)
for msg in request.POST.getlist('messages'):
if fail_silently is not None:
getattr(messages, message_type)(request, msg,
fail_silently=fail_silently)
else:
getattr(messages, message_type)(request, msg)
show_url = reverse('django.contrib.messages.tests.urls.show')
return HttpResponseRedirect(show_url)
def show(request):
t = Template("""{% if messages %}
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>
{{ message }}
</li>
{% endfor %}
</ul>
{% endif %}""")
return HttpResponse(t.render(RequestContext(request)))
urlpatterns = patterns('',
('^add/(debug|info|success|warning|error)/$', add),
('^show/$', show),
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import unicode_literals
from textx.metamodel import metamodel_from_str
call_counter = 0
grammar1 = """
foo:
'foo' m_formula = Formula
;
Formula:
( values=FormulaExpression values='+' ( values=FormulaExpression)* )
;
FormulaExpression:
values=bar
;
bar:
m_id=/[a-f0-9]+/
;
"""
grammar2 = u"""
foo:
'foo' m_formula = Formula
;
Formula:
( values=FormulaExpression ( values=FormulaExpression)* )
;
FormulaExpression:
values=bar
;
bar:
m_id=/[a-f0-9]+/
;
"""
grammar3 = u"""
foo:
'foo' m_formula = Formula
;
Formula:
( values=FormulaExpression values='+' ( values=FormulaExpression)* )
;
FormulaExpression:
values=/[a-f0-9]+/
;
"""
def default_processor(obj):
global call_counter
call_counter += 1
print("PROCESSING " + str(obj.__class__.__name__))
def parse_str(grammar, lola_str):
lola_str = lola_str
obj_processors = {
"foo": default_processor,
"Formula": default_processor,
"FormulaExpression": default_processor,
"bar": default_processor,
}
meta_model = metamodel_from_str(grammar, ignore_case=True,
auto_init_attributes=False)
meta_model.register_obj_processors(obj_processors)
model = meta_model.model_from_str(lola_str)
return model
def test_issue_80_object_processors():
global call_counter
test_str = "foo a323 + a111"
call_counter = 0
parse_str(grammar1, test_str)
assert call_counter == 6
test_str = "foo a323 a111"
call_counter = 0
parse_str(grammar2, test_str)
assert call_counter == 6
test_str = "foo a323 + a111"
call_counter = 0
parse_str(grammar3, test_str)
assert call_counter == 4
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Laurent Mignon
# Copyright 2014 'ACSONE SA/NV'
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.modules.registry import RegistryManager
from openerp.osv import orm, fields
from openerp import SUPERUSER_ID
import openerp.exceptions
from openerp.addons.auth_from_http_remote_user import utils
class res_users(orm.Model):
_inherit = 'res.users'
_columns = {
'sso_key': fields.char('SSO Key', size=utils.KEY_LENGTH,
readonly=True),
}
def copy(self, cr, uid, rid, defaults=None, context=None):
defaults = defaults or {}
defaults['sso_key'] = False
return super(res_users, self).copy(cr, uid, rid, defaults, context)
def check_credentials(self, cr, uid, password):
try:
return super(res_users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
res = self.search(cr, SUPERUSER_ID, [('id', '=', uid),
('sso_key', '=', password)])
if not res:
raise openerp.exceptions.AccessDenied()
def check(self, db, uid, passwd):
try:
return super(res_users, self).check(db, uid, passwd)
except openerp.exceptions.AccessDenied:
if not passwd:
raise
with RegistryManager.get(db).cursor() as cr:
cr.execute('''SELECT COUNT(1)
FROM res_users
WHERE id=%s
AND sso_key=%s
AND active=%s''', (int(uid), passwd, True))
if not cr.fetchone()[0]:
raise
self._uid_cache.setdefault(db, {})[uid] = passwd
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from cmdutils import *
import os
def run():
r=RunTestLib()
r.run_test('VMbaseConcurrency(-Dselftest=vmbase,concurrency)','%s -Dselftest=vmbase,concurrency' % r.avm,expectedcode=0,
expectedout=[
"\['start', 'vmbase', 'concurrency'\]",
"\['test', 'vmbase', 'concurrency', 'mutexes'\]",
"\['pass', 'vmbase', 'concurrency', 'mutexes'\]",
"\['test', 'vmbase', 'concurrency', 'conditions'\]",
"\['pass', 'vmbase', 'concurrency', 'conditions'\]",
"\['test', 'vmbase', 'concurrency', 'atomic_counter'\]",
"\['pass', 'vmbase', 'concurrency', 'atomic_counter'\]",
"\['test', 'vmbase', 'concurrency', 'compare_and_swap_without_barrier'\]",
"\['pass', 'vmbase', 'concurrency', 'compare_and_swap_without_barrier'\]",
"\['test', 'vmbase', 'concurrency', 'compare_and_swap_with_barrier'\]",
"\['pass', 'vmbase', 'concurrency', 'compare_and_swap_with_barrier'\]",
"\['test', 'vmbase', 'concurrency', 'memory_barrier'\]",
"\['pass', 'vmbase', 'concurrency', 'memory_barrier'\]",
"\['test', 'vmbase', 'concurrency', 'condition_with_wait'\]",
"\['pass', 'vmbase', 'concurrency', 'condition_with_wait'\]",
"\['test', 'vmbase', 'concurrency', 'sleep'\]",
"\['pass', 'vmbase', 'concurrency', 'sleep'\]",
"\['test', 'vmbase', 'concurrency', 'vmthreadlocal'\]",
"\['pass', 'vmbase', 'concurrency', 'vmthreadlocal'\]",
"\['test', 'vmbase', 'concurrency', 'join'\]",
"\['pass', 'vmbase', 'concurrency', 'join'\]",
"\['end', 'vmbase', 'concurrency'\]"
]
)
if __name__ == '__main__':
r=RunTestLib()
run()
|
unknown
|
codeparrot/codeparrot-clean
| ||
// SPDX-License-Identifier: GPL-2.0
#include <linux/fs.h>
#include <linux/security.h>
#include <linux/fscrypt.h>
#include <linux/fsnotify.h>
#include <linux/fileattr.h>
#include <linux/export.h>
#include <linux/syscalls.h>
#include <linux/namei.h>
#include "internal.h"
/**
* fileattr_fill_xflags - initialize fileattr with xflags
* @fa: fileattr pointer
* @xflags: FS_XFLAG_* flags
*
* Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All
* other fields are zeroed.
*/
void fileattr_fill_xflags(struct file_kattr *fa, u32 xflags)
{
memset(fa, 0, sizeof(*fa));
fa->fsx_valid = true;
fa->fsx_xflags = xflags;
if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE)
fa->flags |= FS_IMMUTABLE_FL;
if (fa->fsx_xflags & FS_XFLAG_APPEND)
fa->flags |= FS_APPEND_FL;
if (fa->fsx_xflags & FS_XFLAG_SYNC)
fa->flags |= FS_SYNC_FL;
if (fa->fsx_xflags & FS_XFLAG_NOATIME)
fa->flags |= FS_NOATIME_FL;
if (fa->fsx_xflags & FS_XFLAG_NODUMP)
fa->flags |= FS_NODUMP_FL;
if (fa->fsx_xflags & FS_XFLAG_DAX)
fa->flags |= FS_DAX_FL;
if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
fa->flags |= FS_PROJINHERIT_FL;
}
EXPORT_SYMBOL(fileattr_fill_xflags);
/**
* fileattr_fill_flags - initialize fileattr with flags
* @fa: fileattr pointer
* @flags: FS_*_FL flags
*
* Set ->flags, ->flags_valid and ->fsx_xflags (translated flags).
* All other fields are zeroed.
*/
void fileattr_fill_flags(struct file_kattr *fa, u32 flags)
{
memset(fa, 0, sizeof(*fa));
fa->flags_valid = true;
fa->flags = flags;
if (fa->flags & FS_SYNC_FL)
fa->fsx_xflags |= FS_XFLAG_SYNC;
if (fa->flags & FS_IMMUTABLE_FL)
fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
if (fa->flags & FS_APPEND_FL)
fa->fsx_xflags |= FS_XFLAG_APPEND;
if (fa->flags & FS_NODUMP_FL)
fa->fsx_xflags |= FS_XFLAG_NODUMP;
if (fa->flags & FS_NOATIME_FL)
fa->fsx_xflags |= FS_XFLAG_NOATIME;
if (fa->flags & FS_DAX_FL)
fa->fsx_xflags |= FS_XFLAG_DAX;
if (fa->flags & FS_PROJINHERIT_FL)
fa->fsx_xflags |= FS_XFLAG_PROJINHERIT;
}
EXPORT_SYMBOL(fileattr_fill_flags);
/**
* vfs_fileattr_get - retrieve miscellaneous file attributes
* @dentry: the object to retrieve from
* @fa: fileattr pointer
*
* Call i_op->fileattr_get() callback, if exists.
*
* Return: 0 on success, or a negative error on failure.
*/
int vfs_fileattr_get(struct dentry *dentry, struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
int error;
if (!inode->i_op->fileattr_get)
return -ENOIOCTLCMD;
error = security_inode_file_getattr(dentry, fa);
if (error)
return error;
return inode->i_op->fileattr_get(dentry, fa);
}
EXPORT_SYMBOL(vfs_fileattr_get);
static void fileattr_to_file_attr(const struct file_kattr *fa,
struct file_attr *fattr)
{
__u32 mask = FS_XFLAGS_MASK;
memset(fattr, 0, sizeof(struct file_attr));
fattr->fa_xflags = fa->fsx_xflags & mask;
fattr->fa_extsize = fa->fsx_extsize;
fattr->fa_nextents = fa->fsx_nextents;
fattr->fa_projid = fa->fsx_projid;
fattr->fa_cowextsize = fa->fsx_cowextsize;
}
/**
* copy_fsxattr_to_user - copy fsxattr to userspace.
* @fa: fileattr pointer
* @ufa: fsxattr user pointer
*
* Return: 0 on success, or -EFAULT on failure.
*/
int copy_fsxattr_to_user(const struct file_kattr *fa, struct fsxattr __user *ufa)
{
struct fsxattr xfa;
__u32 mask = FS_XFLAGS_MASK;
memset(&xfa, 0, sizeof(xfa));
xfa.fsx_xflags = fa->fsx_xflags & mask;
xfa.fsx_extsize = fa->fsx_extsize;
xfa.fsx_nextents = fa->fsx_nextents;
xfa.fsx_projid = fa->fsx_projid;
xfa.fsx_cowextsize = fa->fsx_cowextsize;
if (copy_to_user(ufa, &xfa, sizeof(xfa)))
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(copy_fsxattr_to_user);
static int file_attr_to_fileattr(const struct file_attr *fattr,
struct file_kattr *fa)
{
__u64 mask = FS_XFLAGS_MASK;
if (fattr->fa_xflags & ~mask)
return -EINVAL;
fileattr_fill_xflags(fa, fattr->fa_xflags);
fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK;
fa->fsx_extsize = fattr->fa_extsize;
fa->fsx_projid = fattr->fa_projid;
fa->fsx_cowextsize = fattr->fa_cowextsize;
return 0;
}
static int copy_fsxattr_from_user(struct file_kattr *fa,
struct fsxattr __user *ufa)
{
struct fsxattr xfa;
__u32 mask = FS_XFLAGS_MASK;
if (copy_from_user(&xfa, ufa, sizeof(xfa)))
return -EFAULT;
if (xfa.fsx_xflags & ~mask)
return -EOPNOTSUPP;
fileattr_fill_xflags(fa, xfa.fsx_xflags);
fa->fsx_xflags &= ~FS_XFLAG_RDONLY_MASK;
fa->fsx_extsize = xfa.fsx_extsize;
fa->fsx_nextents = xfa.fsx_nextents;
fa->fsx_projid = xfa.fsx_projid;
fa->fsx_cowextsize = xfa.fsx_cowextsize;
return 0;
}
/*
* Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject
* any invalid configurations.
*
* Note: must be called with inode lock held.
*/
static int fileattr_set_prepare(struct inode *inode,
const struct file_kattr *old_ma,
struct file_kattr *fa)
{
int err;
/*
* The IMMUTABLE and APPEND_ONLY flags can only be changed by
* the relevant capability.
*/
if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
!capable(CAP_LINUX_IMMUTABLE))
return -EPERM;
err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags);
if (err)
return err;
/*
* Project Quota ID state is only allowed to change from within the init
* namespace. Enforce that restriction only if we are trying to change
* the quota ID state. Everything else is allowed in user namespaces.
*/
if (current_user_ns() != &init_user_ns) {
if (old_ma->fsx_projid != fa->fsx_projid)
return -EINVAL;
if ((old_ma->fsx_xflags ^ fa->fsx_xflags) &
FS_XFLAG_PROJINHERIT)
return -EINVAL;
} else {
/*
* Caller is allowed to change the project ID. If it is being
* changed, make sure that the new value is valid.
*/
if (old_ma->fsx_projid != fa->fsx_projid &&
!projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid)))
return -EINVAL;
}
/* Check extent size hints. */
if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
return -EINVAL;
if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
!S_ISDIR(inode->i_mode))
return -EINVAL;
if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
return -EINVAL;
/*
* It is only valid to set the DAX flag on regular files and
* directories on filesystems.
*/
if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
return -EINVAL;
/* Extent size hints of zero turn off the flags. */
if (fa->fsx_extsize == 0)
fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
if (fa->fsx_cowextsize == 0)
fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
return 0;
}
/**
* vfs_fileattr_set - change miscellaneous file attributes
* @idmap: idmap of the mount
* @dentry: the object to change
* @fa: fileattr pointer
*
* After verifying permissions, call i_op->fileattr_set() callback, if
* exists.
*
* Verifying attributes involves retrieving current attributes with
* i_op->fileattr_get(), this also allows initializing attributes that have
* not been set by the caller to current values. Inode lock is held
* thoughout to prevent racing with another instance.
*
* Return: 0 on success, or a negative error on failure.
*/
int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
struct file_kattr *fa)
{
struct inode *inode = d_inode(dentry);
struct file_kattr old_ma = {};
int err;
if (!inode->i_op->fileattr_set)
return -ENOIOCTLCMD;
if (!inode_owner_or_capable(idmap, inode))
return -EPERM;
inode_lock(inode);
err = vfs_fileattr_get(dentry, &old_ma);
if (!err) {
/* initialize missing bits from old_ma */
if (fa->flags_valid) {
fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON;
fa->fsx_extsize = old_ma.fsx_extsize;
fa->fsx_nextents = old_ma.fsx_nextents;
fa->fsx_projid = old_ma.fsx_projid;
fa->fsx_cowextsize = old_ma.fsx_cowextsize;
} else {
fa->flags |= old_ma.flags & ~FS_COMMON_FL;
}
err = fileattr_set_prepare(inode, &old_ma, fa);
if (err)
goto out;
err = security_inode_file_setattr(dentry, fa);
if (err)
goto out;
err = inode->i_op->fileattr_set(idmap, dentry, fa);
if (err)
goto out;
fsnotify_xattr(dentry);
}
out:
inode_unlock(inode);
return err;
}
EXPORT_SYMBOL(vfs_fileattr_set);
int ioctl_getflags(struct file *file, unsigned int __user *argp)
{
struct file_kattr fa = { .flags_valid = true }; /* hint only */
int err;
err = vfs_fileattr_get(file->f_path.dentry, &fa);
if (!err)
err = put_user(fa.flags, argp);
return err;
}
int ioctl_setflags(struct file *file, unsigned int __user *argp)
{
struct mnt_idmap *idmap = file_mnt_idmap(file);
struct dentry *dentry = file->f_path.dentry;
struct file_kattr fa;
unsigned int flags;
int err;
err = get_user(flags, argp);
if (!err) {
err = mnt_want_write_file(file);
if (!err) {
fileattr_fill_flags(&fa, flags);
err = vfs_fileattr_set(idmap, dentry, &fa);
mnt_drop_write_file(file);
}
}
return err;
}
int ioctl_fsgetxattr(struct file *file, void __user *argp)
{
struct file_kattr fa = { .fsx_valid = true }; /* hint only */
int err;
err = vfs_fileattr_get(file->f_path.dentry, &fa);
if (!err)
err = copy_fsxattr_to_user(&fa, argp);
return err;
}
int ioctl_fssetxattr(struct file *file, void __user *argp)
{
struct mnt_idmap *idmap = file_mnt_idmap(file);
struct dentry *dentry = file->f_path.dentry;
struct file_kattr fa;
int err;
err = copy_fsxattr_from_user(&fa, argp);
if (!err) {
err = mnt_want_write_file(file);
if (!err) {
err = vfs_fileattr_set(idmap, dentry, &fa);
mnt_drop_write_file(file);
}
}
return err;
}
SYSCALL_DEFINE5(file_getattr, int, dfd, const char __user *, filename,
struct file_attr __user *, ufattr, size_t, usize,
unsigned int, at_flags)
{
struct path filepath __free(path_put) = {};
unsigned int lookup_flags = 0;
struct file_attr fattr;
struct file_kattr fa;
int error;
BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST);
if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
if (!(at_flags & AT_SYMLINK_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
if (usize > PAGE_SIZE)
return -E2BIG;
if (usize < FILE_ATTR_SIZE_VER0)
return -EINVAL;
CLASS(filename_maybe_null, name)(filename, at_flags);
if (!name && dfd >= 0) {
CLASS(fd, f)(dfd);
if (fd_empty(f))
return -EBADF;
filepath = fd_file(f)->f_path;
path_get(&filepath);
} else {
error = filename_lookup(dfd, name, lookup_flags, &filepath,
NULL);
if (error)
return error;
}
error = vfs_fileattr_get(filepath.dentry, &fa);
if (error == -ENOIOCTLCMD || error == -ENOTTY)
error = -EOPNOTSUPP;
if (error)
return error;
fileattr_to_file_attr(&fa, &fattr);
error = copy_struct_to_user(ufattr, usize, &fattr,
sizeof(struct file_attr), NULL);
return error;
}
SYSCALL_DEFINE5(file_setattr, int, dfd, const char __user *, filename,
struct file_attr __user *, ufattr, size_t, usize,
unsigned int, at_flags)
{
struct path filepath __free(path_put) = {};
unsigned int lookup_flags = 0;
struct file_attr fattr;
struct file_kattr fa;
int error;
BUILD_BUG_ON(sizeof(struct file_attr) < FILE_ATTR_SIZE_VER0);
BUILD_BUG_ON(sizeof(struct file_attr) != FILE_ATTR_SIZE_LATEST);
if ((at_flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL;
if (!(at_flags & AT_SYMLINK_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
if (usize > PAGE_SIZE)
return -E2BIG;
if (usize < FILE_ATTR_SIZE_VER0)
return -EINVAL;
error = copy_struct_from_user(&fattr, sizeof(struct file_attr), ufattr,
usize);
if (error)
return error;
error = file_attr_to_fileattr(&fattr, &fa);
if (error)
return error;
CLASS(filename_maybe_null, name)(filename, at_flags);
if (!name && dfd >= 0) {
CLASS(fd, f)(dfd);
if (fd_empty(f))
return -EBADF;
filepath = fd_file(f)->f_path;
path_get(&filepath);
} else {
error = filename_lookup(dfd, name, lookup_flags, &filepath,
NULL);
if (error)
return error;
}
error = mnt_want_write(filepath.mnt);
if (!error) {
error = vfs_fileattr_set(mnt_idmap(filepath.mnt),
filepath.dentry, &fa);
if (error == -ENOIOCTLCMD || error == -ENOTTY)
error = -EOPNOTSUPP;
mnt_drop_write(filepath.mnt);
}
return error;
}
|
c
|
github
|
https://github.com/torvalds/linux
|
fs/file_attr.c
|
"""
Tests for Bookmarks models.
"""
from contextlib import contextmanager
import datetime
import ddt
from freezegun import freeze_time
import mock
from nose.plugins.attrib import attr
import pytz
from opaque_keys.edx.keys import UsageKey
from opaque_keys.edx.locator import CourseLocator, BlockUsageLocator
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import check_mongo_calls, CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from openedx.core.djangolib.testing.utils import skip_unless_lms
from student.tests.factories import AdminFactory, UserFactory
from .. import DEFAULT_FIELDS, OPTIONAL_FIELDS, PathItem
from ..models import Bookmark, XBlockCache, parse_path_data
from .factories import BookmarkFactory
EXAMPLE_USAGE_KEY_1 = u'i4x://org.15/course_15/chapter/Week_1'
EXAMPLE_USAGE_KEY_2 = u'i4x://org.15/course_15/chapter/Week_2'
noop_contextmanager = contextmanager(lambda x: (yield)) # pylint: disable=invalid-name
class BookmarksTestsBase(ModuleStoreTestCase):
"""
Test the Bookmark model.
"""
ALL_FIELDS = DEFAULT_FIELDS + OPTIONAL_FIELDS
STORE_TYPE = ModuleStoreEnum.Type.mongo
TEST_PASSWORD = 'test'
ENABLED_CACHES = ['default', 'mongo_metadata_inheritance', 'loc_cache']
def setUp(self):
super(BookmarksTestsBase, self).setUp()
self.admin = AdminFactory()
self.user = UserFactory.create(password=self.TEST_PASSWORD)
self.other_user = UserFactory.create(password=self.TEST_PASSWORD)
self.setup_data(self.STORE_TYPE)
def setup_data(self, store_type=ModuleStoreEnum.Type.mongo):
""" Create courses and add some test blocks. """
with self.store.default_store(store_type):
self.course = CourseFactory.create(display_name='An Introduction to API Testing')
self.course_id = unicode(self.course.id)
with self.store.bulk_operations(self.course.id):
self.chapter_1 = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name='Week 1'
)
self.chapter_2 = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name='Week 2'
)
self.sequential_1 = ItemFactory.create(
parent_location=self.chapter_1.location, category='sequential', display_name='Lesson 1'
)
self.sequential_2 = ItemFactory.create(
parent_location=self.chapter_1.location, category='sequential', display_name='Lesson 2'
)
self.vertical_1 = ItemFactory.create(
parent_location=self.sequential_1.location, category='vertical', display_name='Subsection 1'
)
self.vertical_2 = ItemFactory.create(
parent_location=self.sequential_2.location, category='vertical', display_name='Subsection 2'
)
self.vertical_3 = ItemFactory.create(
parent_location=self.sequential_2.location, category='vertical', display_name='Subsection 3'
)
self.html_1 = ItemFactory.create(
parent_location=self.vertical_2.location, category='html', display_name='Details 1'
)
self.path = [
PathItem(self.chapter_1.location, self.chapter_1.display_name),
PathItem(self.sequential_2.location, self.sequential_2.display_name),
]
self.bookmark_1 = BookmarkFactory.create(
user=self.user,
course_key=self.course_id,
usage_key=self.sequential_1.location,
xblock_cache=XBlockCache.create({
'display_name': self.sequential_1.display_name,
'usage_key': self.sequential_1.location,
}),
)
self.bookmark_2 = BookmarkFactory.create(
user=self.user,
course_key=self.course_id,
usage_key=self.sequential_2.location,
xblock_cache=XBlockCache.create({
'display_name': self.sequential_2.display_name,
'usage_key': self.sequential_2.location,
}),
)
self.other_course = CourseFactory.create(display_name='An Introduction to API Testing 2')
with self.store.bulk_operations(self.other_course.id):
self.other_chapter_1 = ItemFactory.create(
parent_location=self.other_course.location, category='chapter', display_name='Other Week 1'
)
self.other_sequential_1 = ItemFactory.create(
parent_location=self.other_chapter_1.location, category='sequential', display_name='Other Lesson 1'
)
self.other_sequential_2 = ItemFactory.create(
parent_location=self.other_chapter_1.location, category='sequential', display_name='Other Lesson 2'
)
self.other_vertical_1 = ItemFactory.create(
parent_location=self.other_sequential_1.location, category='vertical', display_name='Other Subsection 1'
)
self.other_vertical_2 = ItemFactory.create(
parent_location=self.other_sequential_1.location, category='vertical', display_name='Other Subsection 2'
)
# self.other_vertical_1 has two parents
self.other_sequential_2.children.append(self.other_vertical_1.location)
modulestore().update_item(self.other_sequential_2, self.admin.id) # pylint: disable=no-member
self.other_bookmark_1 = BookmarkFactory.create(
user=self.user,
course_key=unicode(self.other_course.id),
usage_key=self.other_vertical_1.location,
xblock_cache=XBlockCache.create({
'display_name': self.other_vertical_1.display_name,
'usage_key': self.other_vertical_1.location,
}),
)
def create_course_with_blocks(self, children_per_block=1, depth=1, store_type=ModuleStoreEnum.Type.mongo):
"""
Create a course and add blocks.
"""
with self.store.default_store(store_type):
course = CourseFactory.create()
display_name = 0
with self.store.bulk_operations(course.id):
blocks_at_next_level = [course]
for __ in range(depth):
blocks_at_current_level = blocks_at_next_level
blocks_at_next_level = []
for block in blocks_at_current_level:
for __ in range(children_per_block):
blocks_at_next_level += [ItemFactory.create(
parent_location=block.scope_ids.usage_id, display_name=unicode(display_name)
)]
display_name += 1
return course
def create_course_with_bookmarks_count(self, count, store_type=ModuleStoreEnum.Type.mongo):
"""
Create a course, add some content and add bookmarks.
"""
with self.store.default_store(store_type):
course = CourseFactory.create()
with self.store.bulk_operations(course.id):
blocks = [ItemFactory.create(
parent_location=course.location, category='chapter', display_name=unicode(index)
) for index in range(count)]
bookmarks = [BookmarkFactory.create(
user=self.user,
course_key=course.id,
usage_key=block.location,
xblock_cache=XBlockCache.create({
'display_name': block.display_name,
'usage_key': block.location,
}),
) for block in blocks]
return course, blocks, bookmarks
def assert_bookmark_model_is_valid(self, bookmark, bookmark_data):
"""
Assert that the attributes of the bookmark model were set correctly.
"""
self.assertEqual(bookmark.user, bookmark_data['user'])
self.assertEqual(bookmark.course_key, bookmark_data['course_key'])
self.assertEqual(unicode(bookmark.usage_key), unicode(bookmark_data['usage_key']))
self.assertEqual(bookmark.resource_id, u"{},{}".format(bookmark_data['user'], bookmark_data['usage_key']))
self.assertEqual(bookmark.display_name, bookmark_data['display_name'])
self.assertEqual(bookmark.path, self.path)
self.assertIsNotNone(bookmark.created)
self.assertEqual(bookmark.xblock_cache.course_key, bookmark_data['course_key'])
self.assertEqual(bookmark.xblock_cache.display_name, bookmark_data['display_name'])
def assert_bookmark_data_is_valid(self, bookmark, bookmark_data, check_optional_fields=False):
"""
Assert that the bookmark data matches the data in the model.
"""
self.assertEqual(bookmark_data['id'], bookmark.resource_id)
self.assertEqual(bookmark_data['course_id'], unicode(bookmark.course_key))
self.assertEqual(bookmark_data['usage_id'], unicode(bookmark.usage_key))
self.assertEqual(bookmark_data['block_type'], unicode(bookmark.usage_key.block_type))
self.assertIsNotNone(bookmark_data['created'])
if check_optional_fields:
self.assertEqual(bookmark_data['display_name'], bookmark.display_name)
self.assertEqual(bookmark_data['path'], bookmark.path)
@attr(shard=2)
@ddt.ddt
@skip_unless_lms
class BookmarkModelTests(BookmarksTestsBase):
"""
Test the Bookmark model.
"""
def setUp(self):
super(BookmarkModelTests, self).setUp()
self.vertical_4 = ItemFactory.create(
parent_location=self.sequential_2.location,
category='vertical',
display_name=None
)
def get_bookmark_data(self, block, user=None):
"""
Returns bookmark data for testing.
"""
return {
'user': user or self.user,
'usage_key': block.location,
'course_key': block.location.course_key,
'display_name': block.display_name,
}
@ddt.data(
(ModuleStoreEnum.Type.mongo, 'course', [], 3),
(ModuleStoreEnum.Type.mongo, 'chapter_1', [], 3),
(ModuleStoreEnum.Type.mongo, 'sequential_1', ['chapter_1'], 4),
(ModuleStoreEnum.Type.mongo, 'vertical_1', ['chapter_1', 'sequential_1'], 6),
(ModuleStoreEnum.Type.mongo, 'html_1', ['chapter_1', 'sequential_2', 'vertical_2'], 7),
(ModuleStoreEnum.Type.split, 'course', [], 3),
(ModuleStoreEnum.Type.split, 'chapter_1', [], 2),
(ModuleStoreEnum.Type.split, 'sequential_1', ['chapter_1'], 2),
(ModuleStoreEnum.Type.split, 'vertical_1', ['chapter_1', 'sequential_1'], 2),
(ModuleStoreEnum.Type.split, 'html_1', ['chapter_1', 'sequential_2', 'vertical_2'], 2),
)
@ddt.unpack
def test_path_and_queries_on_create(self, store_type, block_to_bookmark, ancestors_attrs, expected_mongo_calls):
"""
In case of mongo, 1 query is used to fetch the block, and 2
by path_to_location(), and then 1 query per parent in path
is needed to fetch the parent blocks.
"""
self.setup_data(store_type)
user = UserFactory.create()
expected_path = [PathItem(
usage_key=getattr(self, ancestor_attr).location, display_name=getattr(self, ancestor_attr).display_name
) for ancestor_attr in ancestors_attrs]
bookmark_data = self.get_bookmark_data(getattr(self, block_to_bookmark), user=user)
with check_mongo_calls(expected_mongo_calls):
bookmark, __ = Bookmark.create(bookmark_data)
self.assertEqual(bookmark.path, expected_path)
self.assertIsNotNone(bookmark.xblock_cache)
self.assertEqual(bookmark.xblock_cache.paths, [])
def test_create_bookmark_success(self):
"""
Tests creation of bookmark.
"""
bookmark_data = self.get_bookmark_data(self.vertical_2)
bookmark, __ = Bookmark.create(bookmark_data)
self.assert_bookmark_model_is_valid(bookmark, bookmark_data)
bookmark_data_different_values = self.get_bookmark_data(self.vertical_2)
bookmark_data_different_values['display_name'] = 'Introduction Video'
bookmark2, __ = Bookmark.create(bookmark_data_different_values)
# The bookmark object already created should have been returned without modifications.
self.assertEqual(bookmark, bookmark2)
self.assertEqual(bookmark.xblock_cache, bookmark2.xblock_cache)
self.assert_bookmark_model_is_valid(bookmark2, bookmark_data)
bookmark_data_different_user = self.get_bookmark_data(self.vertical_2)
bookmark_data_different_user['user'] = UserFactory.create()
bookmark3, __ = Bookmark.create(bookmark_data_different_user)
self.assertNotEqual(bookmark, bookmark3)
self.assert_bookmark_model_is_valid(bookmark3, bookmark_data_different_user)
def test_create_bookmark_successfully_with_display_name_none(self):
"""
Tests creation of bookmark with display_name None.
"""
bookmark_data = self.get_bookmark_data(self.vertical_4)
bookmark, __ = Bookmark.create(bookmark_data)
bookmark_data['display_name'] = self.vertical_4.display_name_with_default
self.assert_bookmark_model_is_valid(bookmark, bookmark_data)
@ddt.data(
(-30, [[PathItem(EXAMPLE_USAGE_KEY_1, '1')]], 1),
(30, None, 2),
(30, [], 2),
(30, [[PathItem(EXAMPLE_USAGE_KEY_1, '1')]], 1),
(30, [[PathItem(EXAMPLE_USAGE_KEY_1, '1')], [PathItem(EXAMPLE_USAGE_KEY_2, '2')]], 2),
)
@ddt.unpack
@mock.patch('openedx.core.djangoapps.bookmarks.models.Bookmark.get_path')
def test_path(self, seconds_delta, paths, get_path_call_count, mock_get_path):
block_path = [PathItem(UsageKey.from_string(EXAMPLE_USAGE_KEY_1), '1')]
mock_get_path.return_value = block_path
html = ItemFactory.create(
parent_location=self.other_chapter_1.location, category='html', display_name='Other Lesson 1'
)
bookmark_data = self.get_bookmark_data(html)
bookmark, __ = Bookmark.create(bookmark_data)
self.assertIsNotNone(bookmark.xblock_cache)
modification_datetime = datetime.datetime.now(pytz.utc) + datetime.timedelta(seconds=seconds_delta)
with freeze_time(modification_datetime):
bookmark.xblock_cache.paths = paths
bookmark.xblock_cache.save()
self.assertEqual(bookmark.path, block_path)
self.assertEqual(mock_get_path.call_count, get_path_call_count)
@ddt.data(
(ModuleStoreEnum.Type.mongo, 2, 2, 2),
(ModuleStoreEnum.Type.mongo, 4, 2, 2),
(ModuleStoreEnum.Type.mongo, 6, 2, 2),
(ModuleStoreEnum.Type.mongo, 2, 3, 3),
(ModuleStoreEnum.Type.mongo, 4, 3, 3),
# (ModuleStoreEnum.Type.mongo, 6, 3, 3), Too slow.
(ModuleStoreEnum.Type.mongo, 2, 4, 4),
# (ModuleStoreEnum.Type.mongo, 4, 4, 4),
(ModuleStoreEnum.Type.split, 2, 2, 2),
(ModuleStoreEnum.Type.split, 4, 2, 2),
(ModuleStoreEnum.Type.split, 2, 3, 2),
# (ModuleStoreEnum.Type.split, 4, 3, 2),
(ModuleStoreEnum.Type.split, 2, 4, 2),
)
@ddt.unpack
def test_get_path_queries(self, store_type, children_per_block, depth, expected_mongo_calls):
"""
In case of mongo, 2 queries are used by path_to_location(), and then
1 query per parent in path is needed to fetch the parent blocks.
"""
course = self.create_course_with_blocks(children_per_block, depth, store_type)
# Find a leaf block.
block = modulestore().get_course(course.id, depth=None)
for __ in range(depth - 1):
children = block.get_children()
block = children[-1]
with check_mongo_calls(expected_mongo_calls):
path = Bookmark.get_path(block.location)
self.assertEqual(len(path), depth - 2)
def test_get_path_in_case_of_exceptions(self):
user = UserFactory.create()
# Block does not exist
usage_key = UsageKey.from_string('i4x://edX/apis/html/interactive')
usage_key.replace(course_key=self.course.id)
self.assertEqual(Bookmark.get_path(usage_key), [])
# Block is an orphan
self.other_sequential_1.children = []
modulestore().update_item(self.other_sequential_1, self.admin.id) # pylint: disable=no-member
bookmark_data = self.get_bookmark_data(self.other_vertical_2, user=user)
bookmark, __ = Bookmark.create(bookmark_data)
self.assertEqual(bookmark.path, [])
self.assertIsNotNone(bookmark.xblock_cache)
self.assertEqual(bookmark.xblock_cache.paths, [])
# Parent block could not be retrieved
with mock.patch('openedx.core.djangoapps.bookmarks.models.search.path_to_location') as mock_path_to_location:
mock_path_to_location.return_value = [usage_key]
bookmark_data = self.get_bookmark_data(self.other_sequential_1, user=user)
bookmark, __ = Bookmark.create(bookmark_data)
self.assertEqual(bookmark.path, [])
@attr(shard=2)
@ddt.ddt
class XBlockCacheModelTest(ModuleStoreTestCase):
"""
Test the XBlockCache model.
"""
COURSE_KEY = CourseLocator(org='test', course='test', run='test')
CHAPTER1_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='chapter', block_id='chapter1')
SECTION1_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='section', block_id='section1')
SECTION2_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='section', block_id='section1')
VERTICAL1_USAGE_KEY = BlockUsageLocator(COURSE_KEY, block_type='vertical', block_id='sequential1')
PATH1 = [
[unicode(CHAPTER1_USAGE_KEY), 'Chapter 1'],
[unicode(SECTION1_USAGE_KEY), 'Section 1'],
]
PATH2 = [
[unicode(CHAPTER1_USAGE_KEY), 'Chapter 1'],
[unicode(SECTION2_USAGE_KEY), 'Section 2'],
]
def assert_xblock_cache_data(self, xblock_cache, data):
"""
Assert that the XBlockCache object values match.
"""
self.assertEqual(xblock_cache.usage_key, data['usage_key'])
self.assertEqual(xblock_cache.course_key, data['usage_key'].course_key)
self.assertEqual(xblock_cache.display_name, data['display_name'])
self.assertEqual(xblock_cache._paths, data['_paths']) # pylint: disable=protected-access
self.assertEqual(xblock_cache.paths, [parse_path_data(path) for path in data['_paths']])
@ddt.data(
(
[
{'usage_key': VERTICAL1_USAGE_KEY, },
{'display_name': '', '_paths': [], },
],
[
{'usage_key': VERTICAL1_USAGE_KEY, 'display_name': 'Vertical 5', '_paths': [PATH2]},
{'_paths': []},
],
),
(
[
{'usage_key': VERTICAL1_USAGE_KEY, 'display_name': 'Vertical 4', '_paths': [PATH1]},
{},
],
[
{'usage_key': VERTICAL1_USAGE_KEY, 'display_name': 'Vertical 5', '_paths': [PATH2]},
{'_paths': [PATH1]},
],
),
)
def test_create(self, data):
"""
Test XBlockCache.create() constructs and updates objects correctly.
"""
for create_data, additional_data_to_expect in data:
xblock_cache = XBlockCache.create(create_data)
create_data.update(additional_data_to_expect)
self.assert_xblock_cache_data(xblock_cache, create_data)
@ddt.data(
([], [PATH1]),
([PATH1, PATH2], [PATH1]),
([PATH1], []),
)
@ddt.unpack
def test_paths(self, original_paths, updated_paths):
xblock_cache = XBlockCache.create({
'usage_key': self.VERTICAL1_USAGE_KEY,
'display_name': 'The end.',
'_paths': original_paths,
})
self.assertEqual(xblock_cache.paths, [parse_path_data(path) for path in original_paths])
xblock_cache.paths = [parse_path_data(path) for path in updated_paths]
xblock_cache.save()
xblock_cache = XBlockCache.objects.get(id=xblock_cache.id)
self.assertEqual(xblock_cache._paths, updated_paths) # pylint: disable=protected-access
self.assertEqual(xblock_cache.paths, [parse_path_data(path) for path in updated_paths])
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import division, absolute_import, print_function
import sys
import time
from datetime import date
import numpy as np
from numpy.compat import asbytes, asbytes_nested
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_allclose,
assert_raises
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
has_nested_fields, easy_dtype, flatten_dtype
)
class TestLineSplitter(TestCase):
"Tests the LineSplitter class."
def test_no_delimiter(self):
"Test LineSplitter w/o delimiter"
strg = asbytes(" 1 2 3 4 5 # test")
test = LineSplitter()(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5']))
test = LineSplitter('')(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5']))
def test_space_delimiter(self):
"Test space delimiter"
strg = asbytes(" 1 2 3 4 5 # test")
test = LineSplitter(asbytes(' '))(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5']))
test = LineSplitter(asbytes(' '))(strg)
assert_equal(test, asbytes_nested(['1 2 3 4', '5']))
def test_tab_delimiter(self):
"Test tab delimiter"
strg = asbytes(" 1\t 2\t 3\t 4\t 5 6")
test = LineSplitter(asbytes('\t'))(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '5 6']))
strg = asbytes(" 1 2\t 3 4\t 5 6")
test = LineSplitter(asbytes('\t'))(strg)
assert_equal(test, asbytes_nested(['1 2', '3 4', '5 6']))
def test_other_delimiter(self):
"Test LineSplitter on delimiter"
strg = asbytes("1,2,3,4,,5")
test = LineSplitter(asbytes(','))(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5']))
#
strg = asbytes(" 1,2,3,4,,5 # test")
test = LineSplitter(asbytes(','))(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5']))
def test_constant_fixed_width(self):
"Test LineSplitter w/ fixed-width fields"
strg = asbytes(" 1 2 3 4 5 # test")
test = LineSplitter(3)(strg)
assert_equal(test, asbytes_nested(['1', '2', '3', '4', '', '5', '']))
#
strg = asbytes(" 1 3 4 5 6# test")
test = LineSplitter(20)(strg)
assert_equal(test, asbytes_nested(['1 3 4 5 6']))
#
strg = asbytes(" 1 3 4 5 6# test")
test = LineSplitter(30)(strg)
assert_equal(test, asbytes_nested(['1 3 4 5 6']))
def test_variable_fixed_width(self):
strg = asbytes(" 1 3 4 5 6# test")
test = LineSplitter((3, 6, 6, 3))(strg)
assert_equal(test, asbytes_nested(['1', '3', '4 5', '6']))
#
strg = asbytes(" 1 3 4 5 6# test")
test = LineSplitter((6, 6, 9))(strg)
assert_equal(test, asbytes_nested(['1', '3 4', '5 6']))
# -----------------------------------------------------------------------------
class TestNameValidator(TestCase):
def test_case_sensitivity(self):
"Test case sensitivity"
names = ['A', 'a', 'b', 'c']
test = NameValidator().validate(names)
assert_equal(test, ['A', 'a', 'b', 'c'])
test = NameValidator(case_sensitive=False).validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='upper').validate(names)
assert_equal(test, ['A', 'A_1', 'B', 'C'])
test = NameValidator(case_sensitive='lower').validate(names)
assert_equal(test, ['a', 'a_1', 'b', 'c'])
# check exceptions
assert_raises(ValueError, NameValidator, case_sensitive='foobar')
def test_excludelist(self):
"Test excludelist"
names = ['dates', 'data', 'Other Data', 'mask']
validator = NameValidator(excludelist=['dates', 'data', 'mask'])
test = validator.validate(names)
assert_equal(test, ['dates_', 'data_', 'Other_Data', 'mask_'])
def test_missing_names(self):
"Test validate missing names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist), ['a', 'b', 'c'])
namelist = ('', 'b', 'c')
assert_equal(validator(namelist), ['f0', 'b', 'c'])
namelist = ('a', 'b', '')
assert_equal(validator(namelist), ['a', 'b', 'f0'])
namelist = ('', 'f0', '')
assert_equal(validator(namelist), ['f1', 'f0', 'f2'])
def test_validate_nb_names(self):
"Test validate nb names"
namelist = ('a', 'b', 'c')
validator = NameValidator()
assert_equal(validator(namelist, nbfields=1), ('a',))
assert_equal(validator(namelist, nbfields=5, defaultfmt="g%i"),
['a', 'b', 'c', 'g0', 'g1'])
def test_validate_wo_names(self):
"Test validate no names"
namelist = None
validator = NameValidator()
assert_(validator(namelist) is None)
assert_equal(validator(namelist, nbfields=3), ['f0', 'f1', 'f2'])
# -----------------------------------------------------------------------------
def _bytes_to_date(s):
if sys.version_info[0] >= 3:
return date(*time.strptime(s.decode('latin1'), "%Y-%m-%d")[:3])
else:
return date(*time.strptime(s, "%Y-%m-%d")[:3])
class TestStringConverter(TestCase):
"Test StringConverter"
def test_creation(self):
"Test creation of a StringConverter"
converter = StringConverter(int, -99999)
assert_equal(converter._status, 1)
assert_equal(converter.default, -99999)
def test_upgrade(self):
"Tests the upgrade method."
converter = StringConverter()
assert_equal(converter._status, 0)
# test int
assert_equal(converter.upgrade(asbytes('0')), 0)
assert_equal(converter._status, 1)
# On systems where integer defaults to 32-bit, the statuses will be
# offset by one, so we check for this here.
import numpy.core.numeric as nx
status_offset = int(nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize)
# test int > 2**32
assert_equal(converter.upgrade(asbytes('17179869184')), 17179869184)
assert_equal(converter._status, 1 + status_offset)
# test float
assert_allclose(converter.upgrade(asbytes('0.')), 0.0)
assert_equal(converter._status, 2 + status_offset)
# test complex
assert_equal(converter.upgrade(asbytes('0j')), complex('0j'))
assert_equal(converter._status, 3 + status_offset)
# test str
assert_equal(converter.upgrade(asbytes('a')), asbytes('a'))
assert_equal(converter._status, len(converter._mapper) - 1)
def test_missing(self):
"Tests the use of missing values."
converter = StringConverter(missing_values=(asbytes('missing'),
asbytes('missed')))
converter.upgrade(asbytes('0'))
assert_equal(converter(asbytes('0')), 0)
assert_equal(converter(asbytes('')), converter.default)
assert_equal(converter(asbytes('missing')), converter.default)
assert_equal(converter(asbytes('missed')), converter.default)
try:
converter('miss')
except ValueError:
pass
def test_upgrademapper(self):
"Tests updatemapper"
dateparser = _bytes_to_date
StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
convert = StringConverter(dateparser, date(2000, 1, 1))
test = convert(asbytes('2001-01-01'))
assert_equal(test, date(2001, 1, 1))
test = convert(asbytes('2009-01-01'))
assert_equal(test, date(2009, 1, 1))
test = convert(asbytes(''))
assert_equal(test, date(2000, 1, 1))
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
conv = StringConverter(_bytes_to_date)
assert_equal(conv._mapper[-2][0](0), 0j)
assert_(hasattr(conv, 'default'))
def test_keep_default(self):
"Make sure we don't lose an explicit default"
converter = StringConverter(None, missing_values=asbytes(''),
default=-999)
converter.upgrade(asbytes('3.14159265'))
assert_equal(converter.default, -999)
assert_equal(converter.type, np.dtype(float))
#
converter = StringConverter(
None, missing_values=asbytes(''), default=0)
converter.upgrade(asbytes('3.14159265'))
assert_equal(converter.default, 0)
assert_equal(converter.type, np.dtype(float))
def test_keep_default_zero(self):
"Check that we don't lose a default of 0"
converter = StringConverter(int, default=0,
missing_values=asbytes("N/A"))
assert_equal(converter.default, 0)
def test_keep_missing_values(self):
"Check that we're not losing missing values"
converter = StringConverter(int, default=0,
missing_values=asbytes("N/A"))
assert_equal(
converter.missing_values, set(asbytes_nested(['', 'N/A'])))
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
converter = StringConverter(np.int64, default=0)
val = asbytes("-9223372036854775807")
assert_(converter(val) == -9223372036854775807)
val = asbytes("9223372036854775807")
assert_(converter(val) == 9223372036854775807)
def test_uint64_dtype(self):
"Check that uint64 integer types can be specified"
converter = StringConverter(np.uint64, default=0)
val = asbytes("9223372043271415339")
assert_(converter(val) == 9223372043271415339)
class TestMiscFunctions(TestCase):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
ndtype = np.dtype(np.float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
assert_equal(has_nested_fields(ndtype), True)
def test_easy_dtype(self):
"Test ndtype on dtypes"
# Simple case
ndtype = float
assert_equal(easy_dtype(ndtype), np.dtype(float))
# As string w/o names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', "i4"), ('f1', "f8")]))
# As string w/o names but different default format
assert_equal(easy_dtype(ndtype, defaultfmt="field_%03i"),
np.dtype([('field_000', "i4"), ('field_001', "f8")]))
# As string w/ names
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (too many)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', "i4"), ('b', "f8")]))
# As string w/ names (not enough)
ndtype = "i4, f8"
assert_equal(easy_dtype(ndtype, names=", b"),
np.dtype([('f0', "i4"), ('b', "f8")]))
# ... (with different default format)
assert_equal(easy_dtype(ndtype, names="a", defaultfmt="f%02i"),
np.dtype([('a', "i4"), ('f00', "f8")]))
# As list of tuples w/o names
ndtype = [('A', int), ('B', float)]
assert_equal(easy_dtype(ndtype), np.dtype([('A', int), ('B', float)]))
# As list of tuples w/ names
assert_equal(easy_dtype(ndtype, names="a,b"),
np.dtype([('a', int), ('b', float)]))
# As list of tuples w/ not enough names
assert_equal(easy_dtype(ndtype, names="a"),
np.dtype([('a', int), ('f0', float)]))
# As list of tuples w/ too many names
assert_equal(easy_dtype(ndtype, names="a,b,c"),
np.dtype([('a', int), ('b', float)]))
# As list of types w/o names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype),
np.dtype([('f0', int), ('f1', float), ('f2', float)]))
# As list of types w names
ndtype = (int, float, float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([('a', int), ('b', float), ('c', float)]))
# As simple dtype w/ names
ndtype = np.dtype(float)
assert_equal(easy_dtype(ndtype, names="a, b, c"),
np.dtype([(_, float) for _ in ('a', 'b', 'c')]))
# As simple dtype w/o names (but multiple fields)
ndtype = np.dtype(float)
assert_equal(
easy_dtype(ndtype, names=['', '', ''], defaultfmt="f%02i"),
np.dtype([(_, float) for _ in ('f00', 'f01', 'f02')]))
def test_flatten_dtype(self):
"Testing flatten_dtype"
# Standard dtype
dt = np.dtype([("a", "f8"), ("b", "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
# Recursive dtype
dt = np.dtype([("a", [("aa", '|S1'), ("ab", '|S2')]), ("b", int)])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [np.dtype('|S1'), np.dtype('|S2'), int])
# dtype with shaped fields
dt = np.dtype([("a", (float, 2)), ("b", (int, 3))])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, int])
dt_flat = flatten_dtype(dt, True)
assert_equal(dt_flat, [float] * 2 + [int] * 3)
# dtype w/ titles
dt = np.dtype([(("a", "A"), "f8"), (("b", "B"), "f8")])
dt_flat = flatten_dtype(dt)
assert_equal(dt_flat, [float, float])
if __name__ == "__main__":
run_module_suite()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Classes to represent the default SQL aggregate functions
"""
import copy
import warnings
from django.db.models.fields import FloatField, IntegerField
from django.db.models.query_utils import RegisterLookupMixin
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.functional import cached_property
__all__ = ['Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance']
warnings.warn(
"django.db.models.sql.aggregates is deprecated. Use "
"django.db.models.aggregates instead.",
RemovedInDjango110Warning, stacklevel=2)
class Aggregate(RegisterLookupMixin):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = self._ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = self._computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
# Two fake fields used to identify aggregate types in data-conversion operations.
@cached_property
def _ordinal_aggregate_field(self):
return IntegerField()
@cached_property
def _computed_aggregate_field(self):
return FloatField()
def relabeled_clone(self, change_map):
clone = copy.copy(self)
if isinstance(self.col, (list, tuple)):
clone.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
return clone
def as_sql(self, compiler, connection):
"Return the aggregate, rendered as SQL with parameters."
params = []
if hasattr(self.col, 'as_sql'):
field_name, params = self.col.as_sql(compiler, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join(compiler(c) for c in self.col)
else:
field_name = compiler(self.col)
substitutions = {
'function': self.sql_function,
'field': field_name
}
substitutions.update(self.extra)
return self.sql_template % substitutions, params
def get_group_by_cols(self):
return []
@property
def output_field(self):
return self.field
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct='DISTINCT ' if distinct else '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = 'VAR_SAMP' if sample else 'VAR_POP'
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2015 by Ron Frederick <ronf@timeheart.net>.
# All rights reserved.
#
# This program and the accompanying materials are made available under
# the terms of the Eclipse Public License v1.0 which accompanies this
# distribution and is available at:
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Ron Frederick - initial implementation, API, and documentation
"""Ed25519 public key encryption handler"""
from .packet import String, SSHPacket
from .public_key import SSHKey, SSHCertificateV01, KeyExportError
from .public_key import register_public_key_alg, register_certificate_alg
# Short variable names are used here, matching names in the spec
# pylint: disable=invalid-name
class _Ed25519Key(SSHKey):
"""Handler for Ed25519 public key encryption"""
algorithm = b'ssh-ed25519'
def __init__(self, vk, sk):
self._vk = vk
self._sk = sk
def __eq__(self, other):
# This isn't protected access - both objects are _Ed25519Key instances
# pylint: disable=protected-access
return (isinstance(other, self.__class__) and
self._vk == other._vk and self._sk == other._sk)
def __hash__(self):
return hash(self._vk)
@classmethod
def make_private(cls, vk, sk):
"""Construct an Ed25519 private key"""
return cls(vk, sk)
@classmethod
def make_public(cls, vk):
"""Construct an Ed25519 public key"""
return cls(vk, None)
@classmethod
def decode_ssh_private(cls, packet):
"""Decode an SSH format Ed25519 private key"""
vk = packet.get_string()
sk = packet.get_string()
return vk, sk
@classmethod
def decode_ssh_public(cls, packet):
"""Decode an SSH format Ed25519 public key"""
vk = packet.get_string()
return (vk,)
def encode_ssh_private(self):
"""Encode an SSH format Ed25519 private key"""
if self._sk is None:
raise KeyExportError('Key is not private')
return b''.join((String(self.algorithm), String(self._vk),
String(self._sk)))
def encode_ssh_public(self):
"""Encode an SSH format Ed25519 public key"""
return b''.join((String(self.algorithm), String(self._vk)))
def sign(self, data):
"""Return a signature of the specified data using this key"""
if self._sk is None:
raise ValueError('Private key needed for signing')
sig = libnacl.crypto_sign(data, self._sk)
return b''.join((String(self.algorithm), String(sig[:-len(data)])))
def verify(self, data, sig):
"""Verify a signature of the specified data using this key"""
packet = SSHPacket(sig)
if packet.get_string() != self.algorithm:
return False
sig = packet.get_string()
packet.check_end()
try:
return libnacl.crypto_sign_open(sig + data, self._vk) == data
except ValueError:
return False
try:
import libnacl
except (ImportError, OSError):
pass
else:
register_public_key_alg(b'ssh-ed25519', _Ed25519Key)
register_certificate_alg(b'ssh-ed25519-cert-v01@openssh.com',
_Ed25519Key, SSHCertificateV01)
|
unknown
|
codeparrot/codeparrot-clean
| ||
//// [tests/cases/conformance/async/es6/awaitCallExpression/awaitCallExpression2_es6.ts] ////
//// [awaitCallExpression2_es6.ts]
declare var a: boolean;
declare var p: Promise<boolean>;
declare function fn(arg0: boolean, arg1: boolean, arg2: boolean): void;
declare var o: { fn(arg0: boolean, arg1: boolean, arg2: boolean): void; };
declare var pfn: Promise<{ (arg0: boolean, arg1: boolean, arg2: boolean): void; }>;
declare var po: Promise<{ fn(arg0: boolean, arg1: boolean, arg2: boolean): void; }>;
declare function before(): void;
declare function after(): void;
async function func(): Promise<void> {
before();
var b = fn(await p, a, a);
after();
}
//// [awaitCallExpression2_es6.js]
"use strict";
function func() {
return __awaiter(this, void 0, void 0, function* () {
before();
var b = fn(yield p, a, a);
after();
});
}
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/awaitCallExpression2_es6.js
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.words.protocols.jabber.jid}.
"""
from twisted.trial import unittest
from twisted.words.protocols.jabber import jid
class JIDParsingTest(unittest.TestCase):
def test_parse(self):
"""
Test different forms of JIDs.
"""
# Basic forms
self.assertEquals(jid.parse("user@host/resource"),
("user", "host", "resource"))
self.assertEquals(jid.parse("user@host"),
("user", "host", None))
self.assertEquals(jid.parse("host"),
(None, "host", None))
self.assertEquals(jid.parse("host/resource"),
(None, "host", "resource"))
# More interesting forms
self.assertEquals(jid.parse("foo/bar@baz"),
(None, "foo", "bar@baz"))
self.assertEquals(jid.parse("boo@foo/bar@baz"),
("boo", "foo", "bar@baz"))
self.assertEquals(jid.parse("boo@foo/bar/baz"),
("boo", "foo", "bar/baz"))
self.assertEquals(jid.parse("boo/foo@bar@baz"),
(None, "boo", "foo@bar@baz"))
self.assertEquals(jid.parse("boo/foo/bar"),
(None, "boo", "foo/bar"))
self.assertEquals(jid.parse("boo//foo"),
(None, "boo", "/foo"))
def test_noHost(self):
"""
Test for failure on no host part.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@")
def test_doubleAt(self):
"""
Test for failure on double @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@@host")
def test_multipleAt(self):
"""
Test for failure on two @ signs.
This should fail because @ is not a valid character for the host
part of the JID.
"""
self.assertRaises(jid.InvalidFormat, jid.parse, "user@host@host")
# Basic tests for case mapping. These are fallback tests for the
# prepping done in twisted.words.protocols.jabber.xmpp_stringprep
def test_prepCaseMapUser(self):
"""
Test case mapping of the user part of the JID.
"""
self.assertEquals(jid.prep("UsEr", "host", "resource"),
("user", "host", "resource"))
def test_prepCaseMapHost(self):
"""
Test case mapping of the host part of the JID.
"""
self.assertEquals(jid.prep("user", "hoST", "resource"),
("user", "host", "resource"))
def test_prepNoCaseMapResource(self):
"""
Test no case mapping of the resourcce part of the JID.
"""
self.assertEquals(jid.prep("user", "hoST", "resource"),
("user", "host", "resource"))
self.assertNotEquals(jid.prep("user", "host", "Resource"),
("user", "host", "resource"))
class JIDTest(unittest.TestCase):
def test_noneArguments(self):
"""
Test that using no arguments raises an exception.
"""
self.assertRaises(RuntimeError, jid.JID)
def test_attributes(self):
"""
Test that the attributes correspond with the JID parts.
"""
j = jid.JID("user@host/resource")
self.assertEquals(j.user, "user")
self.assertEquals(j.host, "host")
self.assertEquals(j.resource, "resource")
def test_userhost(self):
"""
Test the extraction of the bare JID.
"""
j = jid.JID("user@host/resource")
self.assertEquals("user@host", j.userhost())
def test_userhostOnlyHost(self):
"""
Test the extraction of the bare JID of the full form host/resource.
"""
j = jid.JID("host/resource")
self.assertEquals("host", j.userhost())
def test_userhostJID(self):
"""
Test getting a JID object of the bare JID.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.internJID("user@host")
self.assertIdentical(j2, j1.userhostJID())
def test_userhostJIDNoResource(self):
"""
Test getting a JID object of the bare JID when there was no resource.
"""
j = jid.JID("user@host")
self.assertIdentical(j, j.userhostJID())
def test_fullHost(self):
"""
Test giving a string representation of the JID with only a host part.
"""
j = jid.JID(tuple=(None, 'host', None))
self.assertEqual('host', j.full())
def test_fullHostResource(self):
"""
Test giving a string representation of the JID with host, resource.
"""
j = jid.JID(tuple=(None, 'host', 'resource'))
self.assertEqual('host/resource', j.full())
def test_fullUserHost(self):
"""
Test giving a string representation of the JID with user, host.
"""
j = jid.JID(tuple=('user', 'host', None))
self.assertEqual('user@host', j.full())
def test_fullAll(self):
"""
Test giving a string representation of the JID.
"""
j = jid.JID(tuple=('user', 'host', 'resource'))
self.assertEqual('user@host/resource', j.full())
def test_equality(self):
"""
Test JID equality.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertNotIdentical(j1, j2)
self.assertEqual(j1, j2)
def test_equalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertFalse(j == 'user@host/resource')
def test_inequality(self):
"""
Test JID inequality.
"""
j1 = jid.JID("user1@host/resource")
j2 = jid.JID("user2@host/resource")
self.assertNotEqual(j1, j2)
def test_inequalityWithNonJIDs(self):
"""
Test JID equality.
"""
j = jid.JID("user@host/resource")
self.assertNotEqual(j, 'user@host/resource')
def test_hashable(self):
"""
Test JID hashability.
"""
j1 = jid.JID("user@host/resource")
j2 = jid.JID("user@host/resource")
self.assertEqual(hash(j1), hash(j2))
def test_unicode(self):
"""
Test unicode representation of JIDs.
"""
j = jid.JID(tuple=('user', 'host', 'resource'))
self.assertEquals("user@host/resource", unicode(j))
def test_repr(self):
"""
Test representation of JID objects.
"""
j = jid.JID(tuple=('user', 'host', 'resource'))
self.assertEquals("JID(u'user@host/resource')", repr(j))
class InternJIDTest(unittest.TestCase):
def test_identity(self):
"""
Test that two interned JIDs yield the same object.
"""
j1 = jid.internJID("user@host")
j2 = jid.internJID("user@host")
self.assertIdentical(j1, j2)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/bin/sh
test_description='Windows named pipes'
. ./test-lib.sh
if ! test_have_prereq MINGW
then
skip_all='skipping Windows-specific tests'
test_done
fi
test_expect_success 'o_append write to named pipe' '
GIT_TRACE="$(pwd)/expect" git status >/dev/null 2>&1 &&
{ test-tool windows-named-pipe t0051 >actual 2>&1 & } &&
pid=$! &&
sleep 1 &&
GIT_TRACE=//./pipe/t0051 git status >/dev/null 2>warning &&
wait $pid &&
test_cmp expect actual
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t0051-windows-named-pipe.sh
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.errors;
public class InconsistentTopicIdException extends InvalidMetadataException {
private static final long serialVersionUID = 1L;
public InconsistentTopicIdException(String message) {
super(message);
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/main/java/org/apache/kafka/common/errors/InconsistentTopicIdException.java
|
//// [tests/cases/compiler/ClassDeclaration21.ts] ////
//// [ClassDeclaration21.ts]
class C {
0();
1() { }
}
//// [ClassDeclaration21.js]
"use strict";
class C {
1() { }
}
|
javascript
|
github
|
https://github.com/microsoft/TypeScript
|
tests/baselines/reference/ClassDeclaration21.js
|
"Commonly-used date structures"
from google.appengine._internal.django.utils.translation import ugettext_lazy as _
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1:_('Jan.'), 2:_('Feb.'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'), 7:_('July'),
8:_('Aug.'), 9:_('Sept.'), 10:_('Oct.'), 11:_('Nov.'), 12:_('Dec.')
}
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.message;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.types.Schema;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import java.util.HashSet;
import java.util.Set;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@Timeout(120)
public class ApiMessageTypeTest {
@Test
public void testFromApiKey() {
for (ApiMessageType type : ApiMessageType.values()) {
ApiMessageType type2 = ApiMessageType.fromApiKey(type.apiKey());
assertEquals(type2, type);
}
}
@Test
public void testInvalidFromApiKey() {
try {
ApiMessageType.fromApiKey((short) -1);
fail("expected to get an UnsupportedVersionException");
} catch (UnsupportedVersionException uve) {
// expected
}
}
@Test
public void testUniqueness() {
Set<Short> ids = new HashSet<>();
Set<String> requestNames = new HashSet<>();
Set<String> responseNames = new HashSet<>();
int apiKeysWithNoValidVersionCount = 0;
for (ApiMessageType type : ApiMessageType.values()) {
assertFalse(ids.contains(type.apiKey()),
"found two ApiMessageType objects with id " + type.apiKey());
ids.add(type.apiKey());
ApiKeys apiKey = ApiKeys.forId(type.apiKey());
if (apiKey.hasValidVersion()) {
String requestName = type.newRequest().getClass().getSimpleName();
assertFalse(requestNames.contains(requestName),
"found two ApiMessageType objects with requestName " + requestName);
requestNames.add(requestName);
String responseName = type.newResponse().getClass().getSimpleName();
assertFalse(responseNames.contains(responseName),
"found two ApiMessageType objects with responseName " + responseName);
responseNames.add(responseName);
} else
++apiKeysWithNoValidVersionCount;
}
assertEquals(ApiMessageType.values().length, ids.size());
int expectedNamesCount = ApiMessageType.values().length - apiKeysWithNoValidVersionCount;
assertEquals(expectedNamesCount, requestNames.size());
assertEquals(expectedNamesCount, responseNames.size());
}
@Test
public void testHeaderVersion() {
assertEquals((short) 1, ApiMessageType.PRODUCE.requestHeaderVersion((short) 0));
assertEquals((short) 0, ApiMessageType.PRODUCE.responseHeaderVersion((short) 0));
assertEquals((short) 1, ApiMessageType.PRODUCE.requestHeaderVersion((short) 1));
assertEquals((short) 0, ApiMessageType.PRODUCE.responseHeaderVersion((short) 1));
assertEquals((short) 1, ApiMessageType.CREATE_TOPICS.requestHeaderVersion((short) 4));
assertEquals((short) 0, ApiMessageType.CREATE_TOPICS.responseHeaderVersion((short) 4));
assertEquals((short) 2, ApiMessageType.CREATE_TOPICS.requestHeaderVersion((short) 5));
assertEquals((short) 1, ApiMessageType.CREATE_TOPICS.responseHeaderVersion((short) 5));
}
@Test
public void testHeaderVersionWithNoValidVersion() {
for (ApiMessageType messageType : ApiMessageType.values()) {
if (messageType.lowestSupportedVersion() > messageType.highestSupportedVersion(true)) {
assertThrows(UnsupportedVersionException.class, () -> messageType.requestHeaderVersion((short) 0));
assertThrows(UnsupportedVersionException.class, () -> messageType.responseHeaderVersion((short) 0));
}
}
}
@Test
public void testAllVersionsHaveSchemas() {
for (ApiMessageType type : ApiMessageType.values()) {
assertTrue(type.lowestSupportedVersion() >= 0);
assertEquals(type.requestSchemas().length, type.responseSchemas().length,
"request and response schemas must be the same length for " + type.name());
for (int i = 0; i < type.requestSchemas().length; ++i) {
Schema schema = type.requestSchemas()[i];
if (i >= type.lowestSupportedVersion())
assertNotNull(schema);
else
assertNull(schema);
}
for (int i = 0; i < type.responseSchemas().length; ++i) {
Schema schema = type.responseSchemas()[i];
if (i >= type.lowestSupportedVersion())
assertNotNull(schema);
else
assertNull(schema);
}
assertEquals(type.highestSupportedVersion(true) + 1, type.requestSchemas().length);
}
}
@Test
public void testApiIdsArePositive() {
for (ApiMessageType type : ApiMessageType.values())
assertTrue(type.apiKey() >= 0);
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
clients/src/test/java/org/apache/kafka/common/message/ApiMessageTypeTest.java
|
# -*- encoding: utf-8 -*-
#
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
from openerp.tests.common import TransactionCase
from openerp.models import BaseModel
from datetime import date
class Base_Test_passport(TransactionCase):
"""
Simple test creating a passport
This is a base class for passport test cases.
Inherit from this and setup values.
"""
def setUp(self, vals=None):
"""
Setting up passport.
"""
if vals is None:
vals = {}
# Default test values
self.vals = {'name': 'This is a test passport name',
'number': 'A200124789',
'country_id': 1,
'expiration_date': date(2013, 11, 14),
'birth_date': date(1980, 11, 21),
'gender': 'male',
}
super(Base_Test_passport, self).setUp()
# Overwrite vals if needed
self.vals = dict(self.vals.items() + vals.items())
# Create the passport object; we will be testing this, so store in self
res_passport = self.registry('res.passport')
self.passport_id = res_passport.create(
self.cr, self.uid, self.vals, context=None)
def test_passport(self):
"""
Checking the passport creation.
"""
res_passport = self.registry('res.passport')
passport_obj = res_passport.browse(
self.cr, self.uid, self.passport_id, context=None)
for field in self.vals:
val = passport_obj[field]
if isinstance(val, BaseModel):
self.assertEquals(self.vals[field], val.id,
"IDs for %s don't match: (%i != %i)" %
(field, self.vals[field], val.id))
else:
self.assertEquals(str(self.vals[field]), str(val),
"Values for %s don't match: (%s != %s)" %
(field, str(self.vals[field]), str(val)))
class Test_passport_bad(Base_Test_passport):
"""
Simple test creating a passport, test against bad values
"""
def setUp(self):
"""
Setting up passport, then changing the values to test against.
"""
super(Test_passport_bad, self).setUp()
# Change vals to something wrong
self.vals = {
'name': 'This is the wrong passport name',
'number': 'A111111111',
'country_id': 0,
'expiration_date': date(1999, 11, 14),
'birth_date': date(1999, 11, 21),
'gender': '',
}
def test_passport(self):
"""
Checking the passport creation, assertions should all be false.
"""
res_passport = self.registry('res.passport')
passport_obj = res_passport.browse(
self.cr, self.uid, self.passport_id, context=None)
for field in self.vals:
val = passport_obj[field]
if isinstance(val, BaseModel):
self.assertNotEqual(self.vals[field], val.id,
"IDs for %s don't match: (%i != %i)" %
(field, self.vals[field], val.id))
else:
self.assertNotEqual(str(self.vals[field]), str(val),
"Values for %s don't match: (%s != %s)" %
(field, str(self.vals[field]), str(val)))
class Test_passport_name_get(TransactionCase):
"""
Test name_get
"""
def setUp(self):
"""
Setting up passport with name, country, either and none.
"""
super(Test_passport_name_get, self).setUp()
res_passport = self.registry('res.passport')
res_country = self.registry('res.country')
country = res_country.browse(self.cr, self.uid, 1, context=None)
self.name_on_passport = 'test name'
self.country_name = country.name_get()[0][1]
self.both = res_passport.create(
self.cr, self.uid, {'name': self.name_on_passport,
'country_id': country.id, },
context=None)
self.name_only = res_passport.create(
self.cr, self.uid, {'name': self.name_on_passport, },
context=None)
self.country_only = res_passport.create(
self.cr, self.uid, {'country_id': country.id, },
context=None)
self.neither = res_passport.create(
self.cr, self.uid, {},
context=None)
def test_passport(self):
"""
Checking the passport creation, assertions should all be false.
"""
res_passport = self.registry('res.passport')
both_obj = res_passport.browse(
self.cr, self.uid, self.both, context=None)
name_only = res_passport.browse(
self.cr, self.uid, self.name_only, context=None)
country_only = res_passport.browse(
self.cr, self.uid, self.country_only, context=None)
neither = res_passport.browse(
self.cr, self.uid, self.neither, context=None)
self.assertEquals(
both_obj.name_get()[0][1],
' | '.join((self.country_name, self.name_on_passport)),
'Error in passport name_get() with both country name and name on '
'passport.'
)
self.assertEquals(
name_only.name_get()[0][1], self.name_on_passport,
'Error in passport name_get() with only name on passport.'
)
self.assertEquals(
country_only.name_get()[0][1], self.country_name,
'Error in passport name_get() with only name of country.'
)
self.assertEquals(
neither.name_get()[0][1], '',
'Error in passport name_get() with neither country name nor name '
'on passport.'
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Arcam component."""
import asyncio
from contextlib import suppress
import logging
from arcam.fmj import ConnectionFailed
from arcam.fmj.client import Client
import async_timeout
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT, EVENT_HOMEASSISTANT_STOP
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import (
DEFAULT_SCAN_INTERVAL,
DOMAIN,
DOMAIN_DATA_ENTRIES,
DOMAIN_DATA_TASKS,
SIGNAL_CLIENT_DATA,
SIGNAL_CLIENT_STARTED,
SIGNAL_CLIENT_STOPPED,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = cv.deprecated(DOMAIN)
async def _await_cancel(task):
task.cancel()
with suppress(asyncio.CancelledError):
await task
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the component."""
hass.data[DOMAIN_DATA_ENTRIES] = {}
hass.data[DOMAIN_DATA_TASKS] = {}
async def _stop(_):
asyncio.gather(
*[_await_cancel(task) for task in hass.data[DOMAIN_DATA_TASKS].values()]
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: config_entries.ConfigEntry):
"""Set up config entry."""
entries = hass.data[DOMAIN_DATA_ENTRIES]
tasks = hass.data[DOMAIN_DATA_TASKS]
client = Client(entry.data[CONF_HOST], entry.data[CONF_PORT])
entries[entry.entry_id] = client
task = asyncio.create_task(_run_client(hass, client, DEFAULT_SCAN_INTERVAL))
tasks[entry.entry_id] = task
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "media_player")
)
return True
async def async_unload_entry(hass, entry):
"""Cleanup before removing config entry."""
await hass.config_entries.async_forward_entry_unload(entry, "media_player")
task = hass.data[DOMAIN_DATA_TASKS].pop(entry.entry_id)
await _await_cancel(task)
hass.data[DOMAIN_DATA_ENTRIES].pop(entry.entry_id)
return True
async def _run_client(hass, client, interval):
def _listen(_):
hass.helpers.dispatcher.async_dispatcher_send(SIGNAL_CLIENT_DATA, client.host)
while True:
try:
with async_timeout.timeout(interval):
await client.start()
_LOGGER.debug("Client connected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STARTED, client.host
)
try:
with client.listen(_listen):
await client.process()
finally:
await client.stop()
_LOGGER.debug("Client disconnected %s", client.host)
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_CLIENT_STOPPED, client.host
)
except ConnectionFailed:
await asyncio.sleep(interval)
except asyncio.TimeoutError:
continue
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception, aborting arcam client")
return
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Boost.Function library
// Copyright Douglas Gregor 2002-2003. Use, modification and
// distribution is subject to the Boost Software License, Version
// 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// For more information, see http://www.boost.org
#include <boost/function/function_template.hpp>
|
unknown
|
github
|
https://github.com/mysql/mysql-server
|
extra/boost/boost_1_87_0/boost/function/function0.hpp
|
# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
# (c) 2012-17 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: sequence
author: Jayson Vantuyl <jayson@aggressive.ly>
version_added: "1.0"
short_description: generate a list based on a number sequence
description:
- generates a sequence of items. You can specify a start value, an end value, an optional "stride" value that specifies the number of steps
to increment the sequence, and an optional printf-style format string.
- 'Arguments can be specified as key=value pair strings or as a shortcut form of the arguments string is also accepted: [start-]end[/stride][:format].'
- 'Numerical values can be specified in decimal, hexadecimal (0x3f8) or octal (0600).'
- Starting at version 1.9.2, negative strides are allowed.
options:
start:
description: number at which to start the sequence
default: 0
type: number
end:
description: number at which to end the sequence, dont use this with count
type: number
default: 0
count:
description: number of elements in the sequence, this is not to be used with end
type: number
default: 0
stride:
description: increments between sequence numbers, the default is 1 unless the end is less than the start, then it is -1.
type: number
format:
description: return a string with the generated number formatted in
"""
EXAMPLES = """
- name: create some test users
user:
name: "{{ item }}"
state: present
groups: "evens"
with_sequence: start=0 end=32 format=testuser%02x
- name: create a series of directories with even numbers for some reason
file:
dest: "/var/stuff/{{ item }}"
state: directory
with_sequence: start=4 end=16 stride=2
- name: a simpler way to use the sequence plugin create 4 groups
group:
name: "group{{ item }}"
state: present
with_sequence: count=4
- name: the final countdown
debug: msg={{item}} seconds to detonation
with_sequence: end=0 start=10
"""
RETURN = """
_list:
description: generated sequence of numbers or strings
"""
from re import compile as re_compile, IGNORECASE
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves import xrange
from ansible.parsing.splitter import parse_kv
from ansible.plugins.lookup import LookupBase
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(LookupBase):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
return True
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError("must specify count or end in with_sequence")
elif self.count is not None and self.end is not None:
raise AnsibleError("can't specify both count and end in with_sequence")
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride >= 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % (i, self.format)
)
def run(self, terms, variables, **kwargs):
results = []
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(parse_kv(term))
except AnsibleError:
raise
except Exception as e:
raise AnsibleError("unknown error parsing with_sequence arguments: %r. Error was: %s" % (term, e))
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception as e:
raise AnsibleError(
"unknown error generating sequence: %s" % e
)
return results
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# (Python >= 2.6 including >= 3)
# Using regexps for parsing C++ is, of course, entirely a hack.
# Configuration that you might want to change is in this file,
# below the help message and above the trickier code.
import re, os, sys, subprocess, glob
if len(sys.argv) < 2 or sys.argv[1] in set(['-h','-?','-help','--help']) \
or sys.argv[1] not in set(['instrument', 'restore']):
print("""
Usage: %s [instrument|restore]
'instrument' adds/updates instrumentation; 'restore' deletes it.
Instrumentation is just additions to Lasercake code that send
debug message info to stdout upon entering most Lasercake functions.
It can be useful combined with -Q or such to help debug whether two
different compilations or runs of Lasercake that *should* be doing
the exact same thing in fact *are* doing the exact same thing.
If there's a problem with buggy optimizers, this might be unhelpful,
because the instrumentation will quite likely change what the compiler's
optimizer does.
""" % sys.argv[0])
sys.exit(0)
do_restore = True
do_instrument = (sys.argv[1] == 'instrument')
# Configuration that you might want to change:
# Instrument functions in these files:
# (Note: files not currently instrumented might not easily work
# to instrument, because of regexp hacks doing the wrong thing
# or argument types that can't easily be serialized. To fix the
# latter, add functions for your types similar to
# std::ostream& operator<<(std::ostream&, type)
# , or put the troublesome argument's type name [in the form it's
# used textually] in excluded_re below, or add an overload in
# debug_print_deterministically.hpp.)
filenames = glob.glob('*.cpp')
# Any function argument type strings (as written) that contain
# anything matching this regexp are omitted (not attempted to
# be written to output). This can be useful for large or
# impossible-to-output data (though various tricky things *can*
# be done for certain data; see debug_print_deterministically.hpp).
excluded_re = re.compile(r"""
\b(?:
world|frame_output_t|gl_all_data|gl_collection|gl_call_data
|state_t|tile_physics_state_t|volume_calipers|active_fluids_t
|water_groups_by_location_t|persistent_water_group_info
|groupable_water_volume_calipers_t|persistent_water_groups_t
|objects_map|object_shapes_t
)\b
|\bQ[A-Z]|\bLasercake[A-Z]
|function|_map\b|_set\b|\bset\b|\bmap\b
|collision_detector|priority_queue|borrowed_bitset
""", re.VERBOSE)
# Avoid these specific functions for speed reasons.
# (Alternately, we could put e.g. /*noinstrument*/ immediately before
# the function's begin curly brace and that would also prevent this code
# from instrumenting that function.)
function_names_to_skip_re = re.compile(r"""
\b(
in_old_box|compute_tile_color|collidable_tile|prepare_tile
|cast_vector3_to_float|cast_vector3_to_double|look_here
|tile_manhattan_distance_to_tile_bounding_box
|do_tile
)\b
""", re.VERBOSE)
# The code below is closer to black magic, though it's somewhat commented.
# If you can tweak the regexps or output, for your gain, without breaking
# anything that currently works (instrument and recover on all the files
# in the default value of 'filenames', and as much deterministicness of
# Lasercake output as we can get), then go ahead!
find_functions_re = re.compile(
r"""\b(\w+) #function name
\( #begin parenthesis
([^()]*) #arguments
\) #end parenthesis
\s*(?:const\s*)? #filler matter
(?::[^;]*?[)\s])? # constructor filler matter, ending with ) or
# space right before the function begin curly
# brace.
# Semicolons are excluded as a hack to keep the
# ?: operator from occasionally looking like a
# constructor definition e.g. non-function
# result_type(*i) in:
# i ? result_type(*i) : result_type();
{ #begin function body
""",
re.VERBOSE | re.DOTALL)
filecontents_initial = {}
for filename in filenames:
with open(filename, 'r') as f:
filecontents_initial[filename] = f.read()
argname_re = re.compile(r"""
(.*?[^[<(]) #type
(\b\w+) #arg name
(\s*=[^,]+)? #default argument value
, #comma between arguments (or for hack at end)
""",
re.VERBOSE | re.DOTALL)
def get_arg_names(argstr):
#return re.findall(argname_re, argstr+',')
result = []
for m in re.finditer(argname_re, argstr+','):
#print(m.group(1), m.group(2), re.search(excluded_re, m.group(1)))
if not re.search(excluded_re, m.group(1)):
result.append(m.group(2))
return result
# Give up on parameter packs / vararg functions
# rather than try hard to implement sensible things for uncommon functions.
functions_to_give_up_on_re = re.compile(r"\.\.\.")
# These deal strangely with newlines/tabs/etc currently:
escape_string_for_C_re = re.compile(r"""(["\\])""")
collapse_whitespace_re = re.compile(r"""\s+""")
def escape_string_for_C(string):
return re.sub(escape_string_for_C_re, r'\\\1',
re.sub(collapse_whitespace_re, r' ', string))
def make_string_for_C(string):
return '"' + escape_string_for_C(string) + '"'
de_curly_re = re.compile(r'''\s+{$''')
# These are placed directly into a regex; luckily they
# don't contain any regex special characters:
begin_debug_instrument_str = " {DEBUG_INSTRUMENT_BEGIN;"
end_debug_instrument_str = "DEBUG_INSTRUMENT_END;}"
# The regex that includes those lucky strings above:
remove_instruments_re = re.compile(
begin_debug_instrument_str+'.*?'+end_debug_instrument_str)
# TODO find a way to print 'this', only for member functions?
def augment_functions(filename, m):
if m.group(1) in set(['if', 'while', 'switch', 'for', 'do', 'catch',
'BOOST_SCOPE_EXIT']):
return m.group(0)
if re.search(functions_to_give_up_on_re, m.group(0)):
return m.group(0)
if re.search(function_names_to_skip_re, m.group(1)):
return m.group(0)
# This file is mostly time-critical functions:
if filename == 'the_decomposition_of_the_world_into_blocks.cpp' \
and m.group(1) != 'ensure_realization_impl':
return m.group(0)
fnname = m.group(1)
argnames = get_arg_names(m.group(2))
result = m.group(0)
result += begin_debug_instrument_str
result += (""" debug_print_ostream() << "%s("; """ % (escape_string_for_C(fnname)))
#result += """ {debug_print_ostream() << __func__ << '('; """
#result += """ {debug_print_ostream() << __FILE__ << ':' << __LINE__ << ':' << __PRETTY_FUNCTION__ << '('; """
#result += """ {debug_print_ostream() << __PRETTY_FUNCTION__ << " ("; """
first = True
for argname in argnames:
if first:
first = False
else:
result += """debug_print_ostream() << ", "; """
result += """debug_print_val_deterministically("""+argname+"); "
fnfullish = re.sub(de_curly_re, '', m.group(0))
#result += r"""debug_print_ostream() << ")\n";}"""
#result += r"""debug_print_ostream() << "): " << __PRETTY_FUNCTION__ << '\n';}"""
# There was a difference between 'long int' and 'long long int' meaning int64_t
# on two different platforms, so avoid __PRETTY_FUNCTION__.
# Hopefully __LINE__ is consistent; it'd be better to compute it here.
# Stringize it at preprocessor-time, anyway, to make it faster at runtime if possible.
result += r"""debug_print_ostream() << "): %s:" BOOST_PP_STRINGIZE(__LINE__) ": %s\n";""" % \
(escape_string_for_C(filename),
escape_string_for_C(fnfullish))
result += end_debug_instrument_str
return result
filecontents_clean = {}
filecontents_instrumented = {}
filecontents_final = {}
for filename in filenames:
cont = filecontents_initial[filename]
if do_restore:
cont = filecontents_clean[filename] = re.sub(remove_instruments_re, '', cont)
if do_instrument:
cont = filecontents_instrumented[filename] = re.sub(
find_functions_re,
lambda m: augment_functions(filename, m),
cont)
filecontents_final[filename] = cont
for filename in filenames:
if filecontents_final[filename] != filecontents_initial[filename]:
with open(filename, 'w') as f:
f.write(filecontents_final[filename])
ch = 'config.hpp'
with open(ch, 'r') as f:
config_contents_initial = f.read()
cont = config_contents_initial
if do_restore:
cont = re.sub('#if 1\|\|DEBUG_PRINT_DETERMINISTICALLY', '#if DEBUG_PRINT_DETERMINISTICALLY', cont)
if do_instrument:
cont = re.sub('#if DEBUG_PRINT_DETERMINISTICALLY', '#if 1||DEBUG_PRINT_DETERMINISTICALLY', cont)
config_contents_final = cont
if config_contents_final != config_contents_initial:
with open(ch, 'w') as f:
f.write(config_contents_final)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
"""
Django settings for foundation project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('foundation')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
# Useful template tags:
'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'dal',
'dal_select2',
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'django_tables2',
'django_states',
'teryt_tree',
'bootstrap_pagination',
'taggit',
'djmail',
'ckeditor',
'django_mailbox',
'django_basic_tinymce_flatpages',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'foundation.main',
'foundation.users',
'foundation.offices',
'foundation.offices.emails',
'foundation.teryt',
'foundation.correspondence',
'foundation.cases',
'foundation.letters',
'foundation.press',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = LOCAL_APPS + THIRD_PARTY_APPS + DJANGO_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'foundation.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Adam Dobrawy""", 'naczelnik@jawnosc.tk'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="mysql:///foundation"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Warsaw'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'pl-pl'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'atom.ext.slugify.slugifier.ascii_slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TAGGIT_CASE_INSENSITIVE = True
# Your common stuff: Below this line define 3rd party library settings
EMAIL_BACKEND = "djmail.backends.default.EmailBackend"
DJMAIL_REAL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
MAILBOX_RECEIVING_PROTOTYPE = 'sprawa-{id}@pytamy.pilnujemy.info'
BLEACH_DEFAULT_WIDGET = 'ckeditor.widgets.CKEditorWidget'
BLEACH_ALLOWED_TAGS = [
# bleach default
'a',
'abbr',
'acronym',
'b',
'blockquote',
'code',
'em',
'i',
'li',
'ol',
'strong',
'ul',
# extra
'br',
'p',
'u',
'div',
]
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'Custom',
'toolbar_Custom': [
['Bold', 'Italic', 'Underline'],
['NumberedList', 'BulletedList', '-', 'Blockquote'],
['Link', 'Unlink'],
['RemoveFormat', 'Source']
]
}
}
DJANGO_MAILBOX_STORE_ORIGINAL_MESSAGE = True
FLATPAGE_WIDGET = 'ckeditor.widgets.CKEditorWidget'
FLATPAGE_KWARGS = {}
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Implementation of the XDG Menu Specification Version 1.0.draft-1
http://standards.freedesktop.org/menu-spec/
"""
from __future__ import generators
import locale, os, xml.dom.minidom
from xdg.BaseDirectory import *
from xdg.DesktopEntry import *
from xdg.Exceptions import *
import xdg.Locale
import xdg.Config
ELEMENT_NODE = xml.dom.Node.ELEMENT_NODE
# for python <= 2.3
try:
reversed = reversed
except NameError:
def reversed(x):
return x[::-1]
class Menu:
def __init__(self):
# Public stuff
self.Name = ""
self.Directory = None
self.Entries = []
self.Doc = ""
self.Filename = ""
self.Depth = 0
self.Parent = None
self.NotInXml = False
# Can be one of Deleted/NoDisplay/Hidden/Empty/NotShowIn or True
self.Show = True
self.Visible = 0
# Private stuff, only needed for parsing
self.AppDirs = []
self.DefaultLayout = None
self.Deleted = "notset"
self.Directories = []
self.DirectoryDirs = []
self.Layout = None
self.MenuEntries = []
self.Moves = []
self.OnlyUnallocated = "notset"
self.Rules = []
self.Submenus = []
def __str__(self):
return self.Name
def __add__(self, other):
for dir in other.AppDirs:
self.AppDirs.append(dir)
for dir in other.DirectoryDirs:
self.DirectoryDirs.append(dir)
for directory in other.Directories:
self.Directories.append(directory)
if other.Deleted != "notset":
self.Deleted = other.Deleted
if other.OnlyUnallocated != "notset":
self.OnlyUnallocated = other.OnlyUnallocated
if other.Layout:
self.Layout = other.Layout
if other.DefaultLayout:
self.DefaultLayout = other.DefaultLayout
for rule in other.Rules:
self.Rules.append(rule)
for move in other.Moves:
self.Moves.append(move)
for submenu in other.Submenus:
self.addSubmenu(submenu)
return self
# FIXME: Performance: cache getName()
def __cmp__(self, other):
return locale.strcoll(self.getName(), other.getName())
def __eq__(self, other):
if self.Name == str(other):
return True
else:
return False
""" PUBLIC STUFF """
def getEntries(self, hidden=False):
for entry in self.Entries:
if hidden == True:
yield entry
elif entry.Show == True:
yield entry
# FIXME: Add searchEntry/seaqrchMenu function
# search for name/comment/genericname/desktopfileide
# return multiple items
def getMenuEntry(self, desktopfileid, deep = False):
for menuentry in self.MenuEntries:
if menuentry.DesktopFileID == desktopfileid:
return menuentry
if deep == True:
for submenu in self.Submenus:
submenu.getMenuEntry(desktopfileid, deep)
def getMenu(self, path):
array = path.split("/", 1)
for submenu in self.Submenus:
if submenu.Name == array[0]:
if len(array) > 1:
return submenu.getMenu(array[1])
else:
return submenu
def getPath(self, org=False, toplevel=False):
parent = self
names=[]
while 1:
if org:
names.append(parent.Name)
else:
names.append(parent.getName())
if parent.Depth > 0:
parent = parent.Parent
else:
break
names.reverse()
path = ""
if toplevel == False:
names.pop(0)
for name in names:
path = os.path.join(path, name)
return path
def getName(self):
try:
return self.Directory.DesktopEntry.getName()
except AttributeError:
return self.Name
def getGenericName(self):
try:
return self.Directory.DesktopEntry.getGenericName()
except AttributeError:
return ""
def getComment(self):
try:
return self.Directory.DesktopEntry.getComment()
except AttributeError:
return ""
def getIcon(self):
try:
return self.Directory.DesktopEntry.getIcon()
except AttributeError:
return ""
""" PRIVATE STUFF """
def addSubmenu(self, newmenu):
for submenu in self.Submenus:
if submenu == newmenu:
submenu += newmenu
break
else:
self.Submenus.append(newmenu)
newmenu.Parent = self
newmenu.Depth = self.Depth + 1
class Move:
"A move operation"
def __init__(self, node=None):
if node:
self.parseNode(node)
else:
self.Old = ""
self.New = ""
def __cmp__(self, other):
return cmp(self.Old, other.Old)
def parseNode(self, node):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == "Old":
try:
self.parseOld(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Old cannot be empty', '??')
elif child.tagName == "New":
try:
self.parseNew(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('New cannot be empty', '??')
def parseOld(self, value):
self.Old = value
def parseNew(self, value):
self.New = value
class Layout:
"Menu Layout class"
def __init__(self, node=None):
self.order = []
if node:
self.show_empty = node.getAttribute("show_empty") or "false"
self.inline = node.getAttribute("inline") or "false"
self.inline_limit = node.getAttribute("inline_limit") or 4
self.inline_header = node.getAttribute("inline_header") or "true"
self.inline_alias = node.getAttribute("inline_alias") or "false"
self.inline_limit = int(self.inline_limit)
self.parseNode(node)
else:
self.show_empty = "false"
self.inline = "false"
self.inline_limit = 4
self.inline_header = "true"
self.inline_alias = "false"
self.order.append(["Merge", "menus"])
self.order.append(["Merge", "files"])
def parseNode(self, node):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == "Menuname":
try:
self.parseMenuname(
child.childNodes[0].nodeValue,
child.getAttribute("show_empty") or "false",
child.getAttribute("inline") or "false",
child.getAttribute("inline_limit") or 4,
child.getAttribute("inline_header") or "true",
child.getAttribute("inline_alias") or "false" )
except IndexError:
raise ValidationError('Menuname cannot be empty', "")
elif child.tagName == "Separator":
self.parseSeparator()
elif child.tagName == "Filename":
try:
self.parseFilename(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Filename cannot be empty', "")
elif child.tagName == "Merge":
self.parseMerge(child.getAttribute("type") or "all")
def parseMenuname(self, value, empty="false", inline="false", inline_limit=4, inline_header="true", inline_alias="false"):
self.order.append(["Menuname", value, empty, inline, inline_limit, inline_header, inline_alias])
self.order[-1][4] = int(self.order[-1][4])
def parseSeparator(self):
self.order.append(["Separator"])
def parseFilename(self, value):
self.order.append(["Filename", value])
def parseMerge(self, type="all"):
self.order.append(["Merge", type])
class Rule:
"Inlcude / Exclude Rules Class"
def __init__(self, type, node=None):
# Type is Include or Exclude
self.Type = type
# Rule is a python expression
self.Rule = ""
# Private attributes, only needed for parsing
self.Depth = 0
self.Expr = [ "or" ]
self.New = True
# Begin parsing
if node:
self.parseNode(node)
self.compile()
def __str__(self):
return self.Rule
def compile(self):
exec("""
def do(menuentries, type, run):
for menuentry in menuentries:
if run == 2 and ( menuentry.MatchedInclude == True \
or menuentry.Allocated == True ):
continue
elif %s:
if type == "Include":
menuentry.Add = True
menuentry.MatchedInclude = True
else:
menuentry.Add = False
return menuentries
""" % self.Rule) in self.__dict__
def parseNode(self, node):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == 'Filename':
try:
self.parseFilename(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Filename cannot be empty', "???")
elif child.tagName == 'Category':
try:
self.parseCategory(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Category cannot be empty', "???")
elif child.tagName == 'All':
self.parseAll()
elif child.tagName == 'And':
self.parseAnd(child)
elif child.tagName == 'Or':
self.parseOr(child)
elif child.tagName == 'Not':
self.parseNot(child)
def parseNew(self, set=True):
if not self.New:
self.Rule += " " + self.Expr[self.Depth] + " "
if not set:
self.New = True
elif set:
self.New = False
def parseFilename(self, value):
self.parseNew()
self.Rule += "menuentry.DesktopFileID == '%s'" % value.strip().replace("\\", r"\\").replace("'", r"\'")
def parseCategory(self, value):
self.parseNew()
self.Rule += "'%s' in menuentry.Categories" % value.strip()
def parseAll(self):
self.parseNew()
self.Rule += "True"
def parseAnd(self, node):
self.parseNew(False)
self.Rule += "("
self.Depth += 1
self.Expr.append("and")
self.parseNode(node)
self.Depth -= 1
self.Expr.pop()
self.Rule += ")"
def parseOr(self, node):
self.parseNew(False)
self.Rule += "("
self.Depth += 1
self.Expr.append("or")
self.parseNode(node)
self.Depth -= 1
self.Expr.pop()
self.Rule += ")"
def parseNot(self, node):
self.parseNew(False)
self.Rule += "not ("
self.Depth += 1
self.Expr.append("or")
self.parseNode(node)
self.Depth -= 1
self.Expr.pop()
self.Rule += ")"
class MenuEntry:
"Wrapper for 'Menu Style' Desktop Entries"
def __init__(self, filename, dir="", prefix=""):
# Create entry
self.DesktopEntry = DesktopEntry(os.path.join(dir,filename))
self.setAttributes(filename, dir, prefix)
# Can be one of Deleted/Hidden/Empty/NotShowIn/NoExec or True
self.Show = True
# Semi-Private
self.Original = None
self.Parents = []
# Private Stuff
self.Allocated = False
self.Add = False
self.MatchedInclude = False
# Caching
self.Categories = self.DesktopEntry.getCategories()
def save(self):
if self.DesktopEntry.tainted == True:
self.DesktopEntry.write()
def getDir(self):
return self.DesktopEntry.filename.replace(self.Filename, '')
def getType(self):
# Can be one of System/User/Both
if xdg.Config.root_mode == False:
if self.Original:
return "Both"
elif xdg_data_dirs[0] in self.DesktopEntry.filename:
return "User"
else:
return "System"
else:
return "User"
def setAttributes(self, filename, dir="", prefix=""):
self.Filename = filename
self.Prefix = prefix
self.DesktopFileID = os.path.join(prefix,filename).replace("/", "-")
if not os.path.isabs(self.DesktopEntry.filename):
self.__setFilename()
def updateAttributes(self):
if self.getType() == "System":
self.Original = MenuEntry(self.Filename, self.getDir(), self.Prefix)
self.__setFilename()
def __setFilename(self):
if xdg.Config.root_mode == False:
path = xdg_data_dirs[0]
else:
path= xdg_data_dirs[1]
if self.DesktopEntry.getType() == "Application":
dir = os.path.join(path, "applications")
else:
dir = os.path.join(path, "desktop-directories")
self.DesktopEntry.filename = os.path.join(dir, self.Filename)
def __cmp__(self, other):
return locale.strcoll(self.DesktopEntry.getName(), other.DesktopEntry.getName())
def __eq__(self, other):
if self.DesktopFileID == str(other):
return True
else:
return False
def __repr__(self):
return self.DesktopFileID
class Separator:
"Just a dummy class for Separators"
def __init__(self, parent):
self.Parent = parent
self.Show = True
class Header:
"Class for Inline Headers"
def __init__(self, name, generic_name, comment):
self.Name = name
self.GenericName = generic_name
self.Comment = comment
def __str__(self):
return self.Name
tmp = {}
def __getFileName(filename):
dirs = xdg_config_dirs[:]
if xdg.Config.root_mode == True:
dirs.pop(0)
for dir in dirs:
menuname = os.path.join (dir, "menus" , filename)
if os.path.isdir(dir) and os.path.isfile(menuname):
return menuname
def parse(filename=None):
# conver to absolute path
if filename and not os.path.isabs(filename):
filename = __getFileName(filename)
# use default if no filename given
if not filename:
candidate = os.environ.get('XDG_MENU_PREFIX', '') + "applications.menu"
filename = __getFileName(candidate)
if not filename:
raise ParsingError('File not found', "/etc/xdg/menus/%s" % candidate)
# check if it is a .menu file
if not os.path.splitext(filename)[1] == ".menu":
raise ParsingError('Not a .menu file', filename)
# create xml parser
try:
doc = xml.dom.minidom.parse(filename)
except xml.parsers.expat.ExpatError:
raise ParsingError('Not a valid .menu file', filename)
# parse menufile
tmp["Root"] = ""
tmp["mergeFiles"] = []
tmp["DirectoryDirs"] = []
tmp["cache"] = MenuEntryCache()
__parse(doc, filename, tmp["Root"])
__parsemove(tmp["Root"])
__postparse(tmp["Root"])
tmp["Root"].Doc = doc
tmp["Root"].Filename = filename
# generate the menu
__genmenuNotOnlyAllocated(tmp["Root"])
__genmenuOnlyAllocated(tmp["Root"])
# and finally sort
sort(tmp["Root"])
return tmp["Root"]
def __parse(node, filename, parent=None):
for child in node.childNodes:
if child.nodeType == ELEMENT_NODE:
if child.tagName == 'Menu':
__parseMenu(child, filename, parent)
elif child.tagName == 'AppDir':
try:
__parseAppDir(child.childNodes[0].nodeValue, filename, parent)
except IndexError:
raise ValidationError('AppDir cannot be empty', filename)
elif child.tagName == 'DefaultAppDirs':
__parseDefaultAppDir(filename, parent)
elif child.tagName == 'DirectoryDir':
try:
__parseDirectoryDir(child.childNodes[0].nodeValue, filename, parent)
except IndexError:
raise ValidationError('DirectoryDir cannot be empty', filename)
elif child.tagName == 'DefaultDirectoryDirs':
__parseDefaultDirectoryDir(filename, parent)
elif child.tagName == 'Name' :
try:
parent.Name = child.childNodes[0].nodeValue
except IndexError:
raise ValidationError('Name cannot be empty', filename)
elif child.tagName == 'Directory' :
try:
parent.Directories.append(child.childNodes[0].nodeValue)
except IndexError:
raise ValidationError('Directory cannot be empty', filename)
elif child.tagName == 'OnlyUnallocated':
parent.OnlyUnallocated = True
elif child.tagName == 'NotOnlyUnallocated':
parent.OnlyUnallocated = False
elif child.tagName == 'Deleted':
parent.Deleted = True
elif child.tagName == 'NotDeleted':
parent.Deleted = False
elif child.tagName == 'Include' or child.tagName == 'Exclude':
parent.Rules.append(Rule(child.tagName, child))
elif child.tagName == 'MergeFile':
try:
if child.getAttribute("type") == "parent":
__parseMergeFile("applications.menu", child, filename, parent)
else:
__parseMergeFile(child.childNodes[0].nodeValue, child, filename, parent)
except IndexError:
raise ValidationError('MergeFile cannot be empty', filename)
elif child.tagName == 'MergeDir':
try:
__parseMergeDir(child.childNodes[0].nodeValue, child, filename, parent)
except IndexError:
raise ValidationError('MergeDir cannot be empty', filename)
elif child.tagName == 'DefaultMergeDirs':
__parseDefaultMergeDirs(child, filename, parent)
elif child.tagName == 'Move':
parent.Moves.append(Move(child))
elif child.tagName == 'Layout':
if len(child.childNodes) > 1:
parent.Layout = Layout(child)
elif child.tagName == 'DefaultLayout':
if len(child.childNodes) > 1:
parent.DefaultLayout = Layout(child)
elif child.tagName == 'LegacyDir':
try:
__parseLegacyDir(child.childNodes[0].nodeValue, child.getAttribute("prefix"), filename, parent)
except IndexError:
raise ValidationError('LegacyDir cannot be empty', filename)
elif child.tagName == 'KDELegacyDirs':
__parseKDELegacyDirs(filename, parent)
def __parsemove(menu):
for submenu in menu.Submenus:
__parsemove(submenu)
# parse move operations
for move in menu.Moves:
move_from_menu = menu.getMenu(move.Old)
if move_from_menu:
move_to_menu = menu.getMenu(move.New)
menus = move.New.split("/")
oldparent = None
while len(menus) > 0:
if not oldparent:
oldparent = menu
newmenu = oldparent.getMenu(menus[0])
if not newmenu:
newmenu = Menu()
newmenu.Name = menus[0]
if len(menus) > 1:
newmenu.NotInXml = True
oldparent.addSubmenu(newmenu)
oldparent = newmenu
menus.pop(0)
newmenu += move_from_menu
move_from_menu.Parent.Submenus.remove(move_from_menu)
def __postparse(menu):
# unallocated / deleted
if menu.Deleted == "notset":
menu.Deleted = False
if menu.OnlyUnallocated == "notset":
menu.OnlyUnallocated = False
# Layout Tags
if not menu.Layout or not menu.DefaultLayout:
if menu.DefaultLayout:
menu.Layout = menu.DefaultLayout
elif menu.Layout:
if menu.Depth > 0:
menu.DefaultLayout = menu.Parent.DefaultLayout
else:
menu.DefaultLayout = Layout()
else:
if menu.Depth > 0:
menu.Layout = menu.Parent.DefaultLayout
menu.DefaultLayout = menu.Parent.DefaultLayout
else:
menu.Layout = Layout()
menu.DefaultLayout = Layout()
# add parent's app/directory dirs
if menu.Depth > 0:
menu.AppDirs = menu.Parent.AppDirs + menu.AppDirs
menu.DirectoryDirs = menu.Parent.DirectoryDirs + menu.DirectoryDirs
# remove duplicates
menu.Directories = __removeDuplicates(menu.Directories)
menu.DirectoryDirs = __removeDuplicates(menu.DirectoryDirs)
menu.AppDirs = __removeDuplicates(menu.AppDirs)
# go recursive through all menus
for submenu in menu.Submenus:
__postparse(submenu)
# reverse so handling is easier
menu.Directories.reverse()
menu.DirectoryDirs.reverse()
menu.AppDirs.reverse()
# get the valid .directory file out of the list
for directory in menu.Directories:
for dir in menu.DirectoryDirs:
if os.path.isfile(os.path.join(dir, directory)):
menuentry = MenuEntry(directory, dir)
if not menu.Directory:
menu.Directory = menuentry
elif menuentry.getType() == "System":
if menu.Directory.getType() == "User":
menu.Directory.Original = menuentry
if menu.Directory:
break
# Menu parsing stuff
def __parseMenu(child, filename, parent):
m = Menu()
__parse(child, filename, m)
if parent:
parent.addSubmenu(m)
else:
tmp["Root"] = m
# helper function
def __check(value, filename, type):
path = os.path.dirname(filename)
if not os.path.isabs(value):
value = os.path.join(path, value)
value = os.path.abspath(value)
if type == "dir" and os.path.exists(value) and os.path.isdir(value):
return value
elif type == "file" and os.path.exists(value) and os.path.isfile(value):
return value
else:
return False
# App/Directory Dir Stuff
def __parseAppDir(value, filename, parent):
value = __check(value, filename, "dir")
if value:
parent.AppDirs.append(value)
def __parseDefaultAppDir(filename, parent):
for dir in reversed(xdg_data_dirs):
__parseAppDir(os.path.join(dir, "applications"), filename, parent)
def __parseDirectoryDir(value, filename, parent):
value = __check(value, filename, "dir")
if value:
parent.DirectoryDirs.append(value)
def __parseDefaultDirectoryDir(filename, parent):
for dir in reversed(xdg_data_dirs):
__parseDirectoryDir(os.path.join(dir, "desktop-directories"), filename, parent)
# Merge Stuff
def __parseMergeFile(value, child, filename, parent):
if child.getAttribute("type") == "parent":
for dir in xdg_config_dirs:
rel_file = filename.replace(dir, "").strip("/")
if rel_file != filename:
for p in xdg_config_dirs:
if dir == p:
continue
if os.path.isfile(os.path.join(p,rel_file)):
__mergeFile(os.path.join(p,rel_file),child,parent)
break
else:
value = __check(value, filename, "file")
if value:
__mergeFile(value, child, parent)
def __parseMergeDir(value, child, filename, parent):
value = __check(value, filename, "dir")
if value:
for item in os.listdir(value):
try:
if os.path.splitext(item)[1] == ".menu":
__mergeFile(os.path.join(value, item), child, parent)
except UnicodeDecodeError:
continue
def __parseDefaultMergeDirs(child, filename, parent):
basename = os.path.splitext(os.path.basename(filename))[0]
for dir in reversed(xdg_config_dirs):
__parseMergeDir(os.path.join(dir, "menus", basename + "-merged"), child, filename, parent)
def __mergeFile(filename, child, parent):
# check for infinite loops
if filename in tmp["mergeFiles"]:
if debug:
raise ParsingError('Infinite MergeFile loop detected', filename)
else:
return
tmp["mergeFiles"].append(filename)
# load file
try:
doc = xml.dom.minidom.parse(filename)
except IOError:
if debug:
raise ParsingError('File not found', filename)
else:
return
except xml.parsers.expat.ExpatError:
if debug:
raise ParsingError('Not a valid .menu file', filename)
else:
return
# append file
for child in doc.childNodes:
if child.nodeType == ELEMENT_NODE:
__parse(child,filename,parent)
break
# Legacy Dir Stuff
def __parseLegacyDir(dir, prefix, filename, parent):
m = __mergeLegacyDir(dir,prefix,filename,parent)
if m:
parent += m
def __mergeLegacyDir(dir, prefix, filename, parent):
dir = __check(dir,filename,"dir")
if dir and dir not in tmp["DirectoryDirs"]:
tmp["DirectoryDirs"].append(dir)
m = Menu()
m.AppDirs.append(dir)
m.DirectoryDirs.append(dir)
m.Name = os.path.basename(dir)
m.NotInXml = True
for item in os.listdir(dir):
try:
if item == ".directory":
m.Directories.append(item)
elif os.path.isdir(os.path.join(dir,item)):
m.addSubmenu(__mergeLegacyDir(os.path.join(dir,item), prefix, filename, parent))
except UnicodeDecodeError:
continue
tmp["cache"].addMenuEntries([dir],prefix, True)
menuentries = tmp["cache"].getMenuEntries([dir], False)
for menuentry in menuentries:
categories = menuentry.Categories
if len(categories) == 0:
r = Rule("Include")
r.parseFilename(menuentry.DesktopFileID)
r.compile()
m.Rules.append(r)
if not dir in parent.AppDirs:
categories.append("Legacy")
menuentry.Categories = categories
return m
def __parseKDELegacyDirs(filename, parent):
f=os.popen3("kde-config --path apps")
output = f[1].readlines()
try:
for dir in output[0].split(":"):
__parseLegacyDir(dir,"kde", filename, parent)
except IndexError:
pass
# remove duplicate entries from a list
def __removeDuplicates(list):
set = {}
list.reverse()
list = [set.setdefault(e,e) for e in list if e not in set]
list.reverse()
return list
# Finally generate the menu
def __genmenuNotOnlyAllocated(menu):
for submenu in menu.Submenus:
__genmenuNotOnlyAllocated(submenu)
if menu.OnlyUnallocated == False:
tmp["cache"].addMenuEntries(menu.AppDirs)
menuentries = []
for rule in menu.Rules:
menuentries = rule.do(tmp["cache"].getMenuEntries(menu.AppDirs), rule.Type, 1)
for menuentry in menuentries:
if menuentry.Add == True:
menuentry.Parents.append(menu)
menuentry.Add = False
menuentry.Allocated = True
menu.MenuEntries.append(menuentry)
def __genmenuOnlyAllocated(menu):
for submenu in menu.Submenus:
__genmenuOnlyAllocated(submenu)
if menu.OnlyUnallocated == True:
tmp["cache"].addMenuEntries(menu.AppDirs)
menuentries = []
for rule in menu.Rules:
menuentries = rule.do(tmp["cache"].getMenuEntries(menu.AppDirs), rule.Type, 2)
for menuentry in menuentries:
if menuentry.Add == True:
menuentry.Parents.append(menu)
# menuentry.Add = False
# menuentry.Allocated = True
menu.MenuEntries.append(menuentry)
# And sorting ...
def sort(menu):
menu.Entries = []
menu.Visible = 0
for submenu in menu.Submenus:
sort(submenu)
tmp_s = []
tmp_e = []
for order in menu.Layout.order:
if order[0] == "Filename":
tmp_e.append(order[1])
elif order[0] == "Menuname":
tmp_s.append(order[1])
for order in menu.Layout.order:
if order[0] == "Separator":
separator = Separator(menu)
if len(menu.Entries) > 0 and isinstance(menu.Entries[-1], Separator):
separator.Show = False
menu.Entries.append(separator)
elif order[0] == "Filename":
menuentry = menu.getMenuEntry(order[1])
if menuentry:
menu.Entries.append(menuentry)
elif order[0] == "Menuname":
submenu = menu.getMenu(order[1])
if submenu:
__parse_inline(submenu, menu)
elif order[0] == "Merge":
if order[1] == "files" or order[1] == "all":
menu.MenuEntries.sort()
for menuentry in menu.MenuEntries:
if menuentry not in tmp_e:
menu.Entries.append(menuentry)
elif order[1] == "menus" or order[1] == "all":
menu.Submenus.sort()
for submenu in menu.Submenus:
if submenu.Name not in tmp_s:
__parse_inline(submenu, menu)
# getHidden / NoDisplay / OnlyShowIn / NotOnlyShowIn / Deleted / NoExec
for entry in menu.Entries:
entry.Show = True
menu.Visible += 1
if isinstance(entry, Menu):
if entry.Deleted == True:
entry.Show = "Deleted"
menu.Visible -= 1
elif isinstance(entry.Directory, MenuEntry):
if entry.Directory.DesktopEntry.getNoDisplay() == True:
entry.Show = "NoDisplay"
menu.Visible -= 1
elif entry.Directory.DesktopEntry.getHidden() == True:
entry.Show = "Hidden"
menu.Visible -= 1
elif isinstance(entry, MenuEntry):
if entry.DesktopEntry.getNoDisplay() == True:
entry.Show = "NoDisplay"
menu.Visible -= 1
elif entry.DesktopEntry.getHidden() == True:
entry.Show = "Hidden"
menu.Visible -= 1
elif entry.DesktopEntry.getTryExec() and not __try_exec(entry.DesktopEntry.getTryExec()):
entry.Show = "NoExec"
menu.Visible -= 1
elif xdg.Config.windowmanager:
if ( entry.DesktopEntry.getOnlyShowIn() != [] and xdg.Config.windowmanager not in entry.DesktopEntry.getOnlyShowIn() ) \
or xdg.Config.windowmanager in entry.DesktopEntry.getNotShowIn():
entry.Show = "NotShowIn"
menu.Visible -= 1
elif isinstance(entry,Separator):
menu.Visible -= 1
# remove separators at the beginning and at the end
if len(menu.Entries) > 0:
if isinstance(menu.Entries[0], Separator):
menu.Entries[0].Show = False
if len(menu.Entries) > 1:
if isinstance(menu.Entries[-1], Separator):
menu.Entries[-1].Show = False
# show_empty tag
for entry in menu.Entries:
if isinstance(entry,Menu) and entry.Layout.show_empty == "false" and entry.Visible == 0:
entry.Show = "Empty"
menu.Visible -= 1
if entry.NotInXml == True:
menu.Entries.remove(entry)
def __try_exec(executable):
paths = os.environ['PATH'].split(os.pathsep)
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
if os.access(f, os.X_OK):
return True
else:
if os.access(executable, os.X_OK):
return True
return False
# inline tags
def __parse_inline(submenu, menu):
if submenu.Layout.inline == "true":
if len(submenu.Entries) == 1 and submenu.Layout.inline_alias == "true":
menuentry = submenu.Entries[0]
menuentry.DesktopEntry.set("Name", submenu.getName(), locale = True)
menuentry.DesktopEntry.set("GenericName", submenu.getGenericName(), locale = True)
menuentry.DesktopEntry.set("Comment", submenu.getComment(), locale = True)
menu.Entries.append(menuentry)
elif len(submenu.Entries) <= submenu.Layout.inline_limit or submenu.Layout.inline_limit == 0:
if submenu.Layout.inline_header == "true":
header = Header(submenu.getName(), submenu.getGenericName(), submenu.getComment())
menu.Entries.append(header)
for entry in submenu.Entries:
menu.Entries.append(entry)
else:
menu.Entries.append(submenu)
else:
menu.Entries.append(submenu)
class MenuEntryCache:
"Class to cache Desktop Entries"
def __init__(self):
self.cacheEntries = {}
self.cacheEntries['legacy'] = []
self.cache = {}
def addMenuEntries(self, dirs, prefix="", legacy=False):
for dir in dirs:
if not self.cacheEntries.has_key(dir):
self.cacheEntries[dir] = []
self.__addFiles(dir, "", prefix, legacy)
def __addFiles(self, dir, subdir, prefix, legacy):
for item in os.listdir(os.path.join(dir,subdir)):
if os.path.splitext(item)[1] == ".desktop":
try:
menuentry = MenuEntry(os.path.join(subdir,item), dir, prefix)
except ParsingError:
continue
self.cacheEntries[dir].append(menuentry)
if legacy == True:
self.cacheEntries['legacy'].append(menuentry)
elif os.path.isdir(os.path.join(dir,subdir,item)) and legacy == False:
self.__addFiles(dir, os.path.join(subdir,item), prefix, legacy)
def getMenuEntries(self, dirs, legacy=True):
list = []
ids = []
# handle legacy items
appdirs = dirs[:]
if legacy == True:
appdirs.append("legacy")
# cache the results again
key = "".join(appdirs)
try:
return self.cache[key]
except KeyError:
pass
for dir in appdirs:
for menuentry in self.cacheEntries[dir]:
try:
if menuentry.DesktopFileID not in ids:
ids.append(menuentry.DesktopFileID)
list.append(menuentry)
elif menuentry.getType() == "System":
# FIXME: This is only 99% correct, but still...
i = list.index(menuentry)
e = list[i]
if e.getType() == "User":
e.Original = menuentry
except UnicodeDecodeError:
continue
self.cache[key] = list
return list
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import annotations
from time import time
from typing import TYPE_CHECKING
from urllib.parse import urldefrag
from twisted.web.client import URI
from scrapy.core.downloader.contextfactory import load_context_factory_from_settings
from scrapy.core.downloader.handlers.base import BaseDownloadHandler
from scrapy.core.http2.agent import H2Agent, H2ConnectionPool, ScrapyProxyH2Agent
from scrapy.exceptions import DownloadTimeoutError
from scrapy.utils._download_handlers import wrap_twisted_exceptions
from scrapy.utils.defer import maybe_deferred_to_future
from scrapy.utils.httpobj import urlparse_cached
from scrapy.utils.python import to_bytes
if TYPE_CHECKING:
from twisted.internet.base import DelayedCall
from twisted.internet.defer import Deferred
from twisted.web.iweb import IPolicyForHTTPS
from scrapy.crawler import Crawler
from scrapy.http import Request, Response
from scrapy.spiders import Spider
class H2DownloadHandler(BaseDownloadHandler):
lazy = True
def __init__(self, crawler: Crawler):
super().__init__(crawler)
self._crawler = crawler
from twisted.internet import reactor
self._pool = H2ConnectionPool(reactor, crawler.settings)
self._context_factory = load_context_factory_from_settings(
crawler.settings, crawler
)
async def download_request(self, request: Request) -> Response:
agent = ScrapyH2Agent(
context_factory=self._context_factory,
pool=self._pool,
crawler=self._crawler,
)
assert self._crawler.spider
with wrap_twisted_exceptions():
return await maybe_deferred_to_future(
agent.download_request(request, self._crawler.spider)
)
async def close(self) -> None:
self._pool.close_connections()
class ScrapyH2Agent:
_Agent = H2Agent
_ProxyAgent = ScrapyProxyH2Agent
def __init__(
self,
context_factory: IPolicyForHTTPS,
pool: H2ConnectionPool,
connect_timeout: int = 10,
bind_address: bytes | None = None,
crawler: Crawler | None = None,
) -> None:
self._context_factory = context_factory
self._connect_timeout = connect_timeout
self._bind_address = bind_address
self._pool = pool
self._crawler = crawler
def _get_agent(self, request: Request, timeout: float | None) -> H2Agent:
from twisted.internet import reactor
bind_address = request.meta.get("bindaddress") or self._bind_address
proxy = request.meta.get("proxy")
if proxy:
if urlparse_cached(request).scheme == "https":
# ToDo
raise NotImplementedError(
"Tunneling via CONNECT method using HTTP/2.0 is not yet supported"
)
return self._ProxyAgent(
reactor=reactor,
context_factory=self._context_factory,
proxy_uri=URI.fromBytes(to_bytes(proxy, encoding="ascii")),
connect_timeout=timeout,
bind_address=bind_address,
pool=self._pool,
)
return self._Agent(
reactor=reactor,
context_factory=self._context_factory,
connect_timeout=timeout,
bind_address=bind_address,
pool=self._pool,
)
def download_request(self, request: Request, spider: Spider) -> Deferred[Response]:
from twisted.internet import reactor
timeout = request.meta.get("download_timeout") or self._connect_timeout
agent = self._get_agent(request, timeout)
start_time = time()
d = agent.request(request, spider)
d.addCallback(self._cb_latency, request, start_time)
timeout_cl = reactor.callLater(timeout, d.cancel)
d.addBoth(self._cb_timeout, request, timeout, timeout_cl)
return d
@staticmethod
def _cb_latency(
response: Response, request: Request, start_time: float
) -> Response:
request.meta["download_latency"] = time() - start_time
return response
@staticmethod
def _cb_timeout(
response: Response, request: Request, timeout: float, timeout_cl: DelayedCall
) -> Response:
if timeout_cl.active():
timeout_cl.cancel()
return response
url = urldefrag(request.url)[0]
raise DownloadTimeoutError(f"Getting {url} took longer than {timeout} seconds.")
|
python
|
github
|
https://github.com/scrapy/scrapy
|
scrapy/core/downloader/handlers/http2.py
|
/*
* Copyright 2010-2024 JetBrains s.r.o. and Kotlin Programming Language contributors.
* Use of this source code is governed by the Apache 2.0 license that can be found in the license/LICENSE.txt file.
*/
package org.jetbrains.kotlin.analysis.api.standalone.fir.test.cases.generated.cases.types;
import com.intellij.testFramework.TestDataPath;
import org.jetbrains.kotlin.test.util.KtTestUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.kotlin.analysis.api.standalone.fir.test.configurators.AnalysisApiFirStandaloneModeTestConfiguratorFactory;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfiguratorFactoryData;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiTestConfigurator;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.TestModuleKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.FrontendKind;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisSessionMode;
import org.jetbrains.kotlin.analysis.test.framework.test.configurators.AnalysisApiMode;
import org.jetbrains.kotlin.analysis.api.impl.base.test.cases.types.AbstractTypePointerConsistencyTest;
import org.jetbrains.kotlin.test.TestMetadata;
import org.junit.jupiter.api.Test;
import java.io.File;
import java.util.regex.Pattern;
/** This class is generated by {@link org.jetbrains.kotlin.generators.tests.analysis.api.GenerateAnalysisApiTestsKt}. DO NOT MODIFY MANUALLY */
@SuppressWarnings("all")
@TestMetadata("analysis/analysis-api/testData/types/typePointers/consistency")
@TestDataPath("$PROJECT_ROOT")
public class FirStandaloneNormalAnalysisSourceModuleTypePointerConsistencyTestGenerated extends AbstractTypePointerConsistencyTest {
@NotNull
@Override
public AnalysisApiTestConfigurator getConfigurator() {
return AnalysisApiFirStandaloneModeTestConfiguratorFactory.INSTANCE.createConfigurator(
new AnalysisApiTestConfiguratorFactoryData(
FrontendKind.Fir,
TestModuleKind.Source,
AnalysisSessionMode.Normal,
AnalysisApiMode.Standalone
)
);
}
@Test
@TestMetadata("aliasedType.kt")
public void testAliasedType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/aliasedType.kt");
}
@Test
@TestMetadata("aliasedTypeToClass.kt")
public void testAliasedTypeToClass() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/aliasedTypeToClass.kt");
}
@Test
@TestMetadata("aliasedTypeUnrelatedModule.kt")
public void testAliasedTypeUnrelatedModule() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/aliasedTypeUnrelatedModule.kt");
}
@Test
public void testAllFilesPresentInConsistency() {
KtTestUtil.assertAllTestsPresentByMetadataWithExcluded(this.getClass(), new File("analysis/analysis-api/testData/types/typePointers/consistency"), Pattern.compile("^(.+)\\.kt$"), null, true);
}
@Test
@TestMetadata("annotatedType.kt")
public void testAnnotatedType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/annotatedType.kt");
}
@Test
@TestMetadata("badArgumentCount.kt")
public void testBadArgumentCount() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/badArgumentCount.kt");
}
@Test
@TestMetadata("badArgumentCount2.kt")
public void testBadArgumentCount2() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/badArgumentCount2.kt");
}
@Test
@TestMetadata("badArgumentCount3.kt")
public void testBadArgumentCount3() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/badArgumentCount3.kt");
}
@Test
@TestMetadata("classType.kt")
public void testClassType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/classType.kt");
}
@Test
@TestMetadata("classTypeToTypeAlias.kt")
public void testClassTypeToTypeAlias() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/classTypeToTypeAlias.kt");
}
@Test
@TestMetadata("classTypeUnrelatedModule.kt")
public void testClassTypeUnrelatedModule() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/classTypeUnrelatedModule.kt");
}
@Test
@TestMetadata("classTypeWithNestedDefinitelyNotNullType.kt")
public void testClassTypeWithNestedDefinitelyNotNullType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/classTypeWithNestedDefinitelyNotNullType.kt");
}
@Test
@TestMetadata("classTypeWithTypeArgumentToAlias.kt")
public void testClassTypeWithTypeArgumentToAlias() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/classTypeWithTypeArgumentToAlias.kt");
}
@Test
@TestMetadata("classTypeWithTypeArgumentUnrelatedModule.kt")
public void testClassTypeWithTypeArgumentUnrelatedModule() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/classTypeWithTypeArgumentUnrelatedModule.kt");
}
@Test
@TestMetadata("definitelyNotNullType.kt")
public void testDefinitelyNotNullType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/definitelyNotNullType.kt");
}
@Test
@TestMetadata("dynamicType.kt")
public void testDynamicType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/dynamicType.kt");
}
@Test
@TestMetadata("errorTypeAsArgument.kt")
public void testErrorTypeAsArgument() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/errorTypeAsArgument.kt");
}
@Test
@TestMetadata("errorTypeAsArgumentToClass.kt")
public void testErrorTypeAsArgumentToClass() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/errorTypeAsArgumentToClass.kt");
}
@Test
@TestMetadata("errorTypeAsArgumentUntelatedModule.kt")
public void testErrorTypeAsArgumentUntelatedModule() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/errorTypeAsArgumentUntelatedModule.kt");
}
@Test
@TestMetadata("flexibleType.kt")
public void testFlexibleType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/flexibleType.kt");
}
@Test
@TestMetadata("flexibleType2.kt")
public void testFlexibleType2() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/flexibleType2.kt");
}
@Test
@TestMetadata("flexibleTypeUnrelatedModule.kt")
public void testFlexibleTypeUnrelatedModule() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/flexibleTypeUnrelatedModule.kt");
}
@Test
@TestMetadata("functionType.kt")
public void testFunctionType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/functionType.kt");
}
@Test
@TestMetadata("functionTypeSuspend.kt")
public void testFunctionTypeSuspend() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/functionTypeSuspend.kt");
}
@Test
@TestMetadata("functionTypeWithContextParameter.kt")
public void testFunctionTypeWithContextParameter() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/functionTypeWithContextParameter.kt");
}
@Test
@TestMetadata("functionTypeWithContextParametersAndReceiver.kt")
public void testFunctionTypeWithContextParametersAndReceiver() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/functionTypeWithContextParametersAndReceiver.kt");
}
@Test
@TestMetadata("functionTypeWithReceiver.kt")
public void testFunctionTypeWithReceiver() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/functionTypeWithReceiver.kt");
}
@Test
@TestMetadata("implicitFlexibleDnnType.kt")
public void testImplicitFlexibleDnnType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/implicitFlexibleDnnType.kt");
}
@Test
@TestMetadata("intersectionType.kt")
public void testIntersectionType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/intersectionType.kt");
}
@Test
@TestMetadata("nestedDefinitelyNotNullType.kt")
public void testNestedDefinitelyNotNullType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/nestedDefinitelyNotNullType.kt");
}
@Test
@TestMetadata("nullableClassType.kt")
public void testNullableClassType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/nullableClassType.kt");
}
@Test
@TestMetadata("nullableType.kt")
public void testNullableType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/nullableType.kt");
}
@Test
@TestMetadata("qualifierNotFound.kt")
public void testQualifierNotFound() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/qualifierNotFound.kt");
}
@Test
@TestMetadata("qualifierNotFound2.kt")
public void testQualifierNotFound2() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/qualifierNotFound2.kt");
}
@Test
@TestMetadata("recursiveType.kt")
public void testRecursiveType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/recursiveType.kt");
}
@Test
@TestMetadata("recursiveTypeCaptured.kt")
public void testRecursiveTypeCaptured() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/recursiveTypeCaptured.kt");
}
@Test
@TestMetadata("symbolNotFound.kt")
public void testSymbolNotFound() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/symbolNotFound.kt");
}
@Test
@TestMetadata("symbolNotFoundToClass.kt")
public void testSymbolNotFoundToClass() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/symbolNotFoundToClass.kt");
}
@Test
@TestMetadata("symbolNotFoundUnrelatedModule.kt")
public void testSymbolNotFoundUnrelatedModule() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/symbolNotFoundUnrelatedModule.kt");
}
@Test
@TestMetadata("typeParameterType.kt")
public void testTypeParameterType() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/typeParameterType.kt");
}
@Test
@TestMetadata("typeParameterType2.kt")
public void testTypeParameterType2() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/typeParameterType2.kt");
}
@Test
@TestMetadata("variance.kt")
public void testVariance() {
runTest("analysis/analysis-api/testData/types/typePointers/consistency/variance.kt");
}
}
|
java
|
github
|
https://github.com/JetBrains/kotlin
|
analysis/analysis-api-standalone/tests-gen/org/jetbrains/kotlin/analysis/api/standalone/fir/test/cases/generated/cases/types/FirStandaloneNormalAnalysisSourceModuleTypePointerConsistencyTestGenerated.java
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import stat
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from cinder import exception
from cinder import utils
from cinder.volume.targets import iscsi
LOG = logging.getLogger(__name__)
class IetAdm(iscsi.ISCSITarget):
VERSION = '0.1'
def __init__(self, *args, **kwargs):
super(IetAdm, self).__init__(*args, **kwargs)
self.iet_conf = self.configuration.safe_get('iet_conf')
self.iscsi_iotype = self.configuration.safe_get('iscsi_iotype')
self.auth_type = 'IncomingUser'
self.iet_sessions = '/proc/net/iet/session'
def _get_target(self, iqn):
# Find existing iSCSI target session from /proc/net/iet/session
#
# tid:2 name:iqn.2010-10.org:volume-222
# sid:562950561399296 initiator:iqn.1994-05.com:5a6894679665
# cid:0 ip:192.168.122.1 state:active hd:none dd:none
# tid:1 name:iqn.2010-10.org:volume-111
# sid:281475567911424 initiator:iqn.1994-05.com:5a6894679665
# cid:0 ip:192.168.122.1 state:active hd:none dd:none
iscsi_target = 0
try:
with open(self.iet_sessions, 'r') as f:
sessions = f.read()
except Exception:
LOG.exception("Failed to open iet session list for %s", iqn)
raise
session_list = re.split('^tid:(?m)', sessions)[1:]
for ses in session_list:
m = re.match(r'(\d+) name:(\S+)\s+', ses)
if m and iqn in m.group(2):
return m.group(1)
return iscsi_target
def _get_iscsi_target(self, context, vol_id):
pass
def _get_target_and_lun(self, context, volume):
# For ietadm dev starts at lun 0
lun = 0
# Using 0, ietadm tries to search empty tid for creating
# new iSCSI target
iscsi_target = 0
# Find existing iSCSI target based on iqn
iqn = '%svolume-%s' % (self.iscsi_target_prefix, volume['id'])
iscsi_target = self._get_target(iqn)
return iscsi_target, lun
def create_iscsi_target(self, name, tid, lun, path,
chap_auth=None, **kwargs):
config_auth = None
vol_id = name.split(':')[1]
# Check the target is already existing.
tmp_tid = self._get_target(name)
# Create a new iSCSI target. If a target already exists,
# the command returns 234, but we ignore it.
try:
self._new_target(name, tid)
tid = self._get_target(name)
self._new_logicalunit(tid, lun, path)
if chap_auth is not None:
(username, password) = chap_auth
config_auth = ' '.join((self.auth_type,) + chap_auth)
self._new_auth(tid, self.auth_type, username, password)
except putils.ProcessExecutionError:
LOG.exception("Failed to create iscsi target for volume "
"id:%s", vol_id)
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
# Update config file only if new scsi target is created.
if not tmp_tid:
self.update_config_file(name, tid, path, config_auth)
return tid
def update_config_file(self, name, tid, path, config_auth):
conf_file = self.iet_conf
vol_id = name.split(':')[1]
# If config file does not exist, create a blank conf file and
# add configuration for the volume on the new file.
if not os.path.exists(conf_file):
try:
utils.execute("truncate", conf_file, "--size=0",
run_as_root=True)
except putils.ProcessExecutionError:
LOG.exception("Failed to create %(conf)s for volume "
"id:%(vol_id)s",
{'conf': conf_file, 'vol_id': vol_id})
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
try:
volume_conf = """
Target %s
%s
Lun 0 Path=%s,Type=%s
""" % (name, config_auth, path, self._iotype(path))
with utils.temporary_chown(conf_file):
with open(conf_file, 'a+') as f:
f.write(volume_conf)
except Exception:
LOG.exception("Failed to update %(conf)s for volume "
"id:%(vol_id)s",
{'conf': conf_file, 'vol_id': vol_id})
raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
LOG.info("Removing iscsi_target for volume: %s", vol_id)
try:
self._delete_logicalunit(tid, lun)
session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id)
if session_info:
sid, cid = session_info
self._force_delete_target(tid, sid, cid)
self._delete_target(tid)
except putils.ProcessExecutionError:
LOG.exception("Failed to remove iscsi target for volume "
"id:%s", vol_id)
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
vol_uuid_file = vol_name
conf_file = self.iet_conf
if os.path.exists(conf_file):
try:
with utils.temporary_chown(conf_file):
with open(conf_file, 'r+') as iet_conf_text:
full_txt = iet_conf_text.readlines()
new_iet_conf_txt = []
count = 0
for line in full_txt:
if count > 0:
count -= 1
continue
elif vol_uuid_file in line:
count = 2
continue
else:
new_iet_conf_txt.append(line)
iet_conf_text.seek(0)
iet_conf_text.truncate(0)
iet_conf_text.writelines(new_iet_conf_txt)
except Exception:
LOG.exception("Failed to update %(conf)s for volume id "
"%(vol_id)s after removing iscsi target",
{'conf': conf_file, 'vol_id': vol_id})
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
else:
LOG.warning("Failed to update %(conf)s for volume id "
"%(vol_id)s after removing iscsi target. "
"%(conf)s does not exist.",
{'conf': conf_file, 'vol_id': vol_id})
def _find_sid_cid_for_target(self, tid, name, vol_id):
"""Find sid, cid for existing iscsi target"""
try:
with open(self.iet_sessions, 'r') as f:
sessions = f.read()
except Exception as e:
LOG.info("Failed to open iet session list for "
"%(vol_id)s: %(e)s",
{'vol_id': vol_id, 'e': e})
return None
session_list = re.split('^tid:(?m)', sessions)[1:]
for ses in session_list:
m = re.match(r'(\d+) name:(\S+)\s+sid:(\d+).+\s+cid:(\d+)', ses)
if m and tid in m.group(1) and name in m.group(2):
return m.group(3), m.group(4)
def _is_block(self, path):
mode = os.stat(path).st_mode
return stat.S_ISBLK(mode)
def _iotype(self, path):
if self.iscsi_iotype == 'auto':
return 'blockio' if self._is_block(path) else 'fileio'
else:
return self.iscsi_iotype
def _new_target(self, name, tid):
"""Create new scsi target using specified parameters.
If the target already exists, ietadm returns
'Invalid argument' and error code '234'.
This should be ignored for ensure export case.
"""
utils.execute('ietadm', '--op', 'new',
'--tid=%s' % tid,
'--params', 'Name=%s' % name,
run_as_root=True, check_exit_code=[0, 234])
def _delete_target(self, tid):
utils.execute('ietadm', '--op', 'delete',
'--tid=%s' % tid,
run_as_root=True)
def _force_delete_target(self, tid, sid, cid):
utils.execute('ietadm', '--op', 'delete',
'--tid=%s' % tid,
'--sid=%s' % sid,
'--cid=%s' % cid,
run_as_root=True)
def show_target(self, tid, iqn=None):
utils.execute('ietadm', '--op', 'show',
'--tid=%s' % tid,
run_as_root=True)
def _new_logicalunit(self, tid, lun, path):
"""Attach a new volume to scsi target as a logical unit.
If a logical unit exists on the specified target lun,
ietadm returns 'File exists' and error code '239'.
This should be ignored for ensure export case.
"""
utils.execute('ietadm', '--op', 'new',
'--tid=%s' % tid,
'--lun=%d' % lun,
'--params',
'Path=%s,Type=%s' % (path, self._iotype(path)),
run_as_root=True, check_exit_code=[0, 239])
def _delete_logicalunit(self, tid, lun):
utils.execute('ietadm', '--op', 'delete',
'--tid=%s' % tid,
'--lun=%d' % lun,
run_as_root=True)
def _new_auth(self, tid, type, username, password):
utils.execute('ietadm', '--op', 'new',
'--tid=%s' % tid,
'--user',
'--params=%s=%s,Password=%s' % (type,
username,
password),
run_as_root=True)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#ifndef HEADER_CURL_ASYN_H
#define HEADER_CURL_ASYN_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#if defined(USE_HTTPSRR) && defined(USE_ARES)
#include "httpsrr.h"
#endif
struct Curl_easy;
struct Curl_dns_entry;
#ifdef CURLRES_ASYNCH
#include "curl_addrinfo.h"
struct hostent;
struct connectdata;
struct easy_pollset;
#if defined(CURLRES_ARES) && defined(CURLRES_THREADED)
#error cannot have both CURLRES_ARES and CURLRES_THREADED defined
#endif
/*
* This header defines all functions in the internal asynch resolver interface.
* All asynch resolvers need to provide these functions.
* asyn-ares.c and asyn-thread.c are the current implementations of asynch
* resolver backends.
*/
/*
* Curl_async_global_init()
*
* Called from curl_global_init() to initialize global resolver environment.
* Returning anything else than CURLE_OK fails curl_global_init().
*/
int Curl_async_global_init(void);
/*
* Curl_async_global_cleanup()
* Called from curl_global_cleanup() to destroy global resolver environment.
*/
void Curl_async_global_cleanup(void);
/*
* Curl_async_get_impl()
* Get the resolver implementation instance (c-ares channel) or NULL
* for passing to application callback.
*/
CURLcode Curl_async_get_impl(struct Curl_easy *easy, void **impl);
/* Curl_async_pollset()
*
* This function is called from the Curl_multi_pollset() function. 'sock' is a
* pointer to an array to hold the file descriptors, with 'numsock' being the
* size of that array (in number of entries). This function is supposed to
* return bitmask indicating what file descriptors (referring to array indexes
* in the 'sock' array) to wait for, read/write.
*/
CURLcode Curl_async_pollset(struct Curl_easy *data, struct easy_pollset *ps);
/*
* Curl_async_is_resolved()
*
* Called repeatedly to check if a previous name resolve request has
* completed. It should also make sure to time-out if the operation seems to
* take too long.
*
* Returns normal CURLcode errors.
*/
CURLcode Curl_async_is_resolved(struct Curl_easy *data,
struct Curl_dns_entry **dns);
/*
* Curl_async_await()
*
* Waits for a resolve to finish. This function should be avoided since using
* this risk getting the multi interface to "hang".
*
* On return 'entry' is assigned the resolved dns (CURLE_OK or NULL otherwise.
*
* Returns CURLE_COULDNT_RESOLVE_HOST if the host was not resolved,
* CURLE_OPERATION_TIMEDOUT if a time-out occurred, or other errors.
*/
CURLcode Curl_async_await(struct Curl_easy *data,
struct Curl_dns_entry **dnsentry);
/*
* Curl_async_getaddrinfo() - when using this resolver
*
* Returns name information about the given hostname and port number. If
* successful, the 'hostent' is returned and the fourth argument will point to
* memory we need to free after use. That memory *MUST* be freed with
* Curl_freeaddrinfo(), nothing else.
*
* Each resolver backend must of course make sure to return data in the
* correct format to comply with this.
*/
CURLcode Curl_async_getaddrinfo(struct Curl_easy *data, const char *hostname,
int port, int ip_version);
#ifdef USE_ARES
/* common functions for c-ares and threaded resolver with HTTPSRR */
#include <ares.h>
CURLcode Curl_ares_pollset(struct Curl_easy *data,
ares_channel channel,
struct easy_pollset *ps);
int Curl_ares_perform(ares_channel channel, timediff_t timeout_ms);
#endif
#ifdef CURLRES_ARES
/* async resolving implementation using c-ares alone */
struct async_ares_ctx {
ares_channel channel;
int num_pending; /* number of outstanding c-ares requests */
struct Curl_addrinfo *temp_ai; /* intermediary result while fetching c-ares
parts */
int ares_status; /* ARES_SUCCESS, ARES_ENOTFOUND, etc. */
CURLcode result; /* CURLE_OK or error handling response */
#ifndef HAVE_CARES_GETADDRINFO
struct curltime happy_eyeballs_dns_time; /* when this timer started, or 0 */
#endif
#ifdef USE_HTTPSRR
struct Curl_https_rrinfo hinfo;
#endif
};
void Curl_async_ares_shutdown(struct Curl_easy *data);
void Curl_async_ares_destroy(struct Curl_easy *data);
/* Set the DNS server to use by ares, from `data` settings. */
CURLcode Curl_async_ares_set_dns_servers(struct Curl_easy *data);
/* Set the DNS interfacer to use by ares, from `data` settings. */
CURLcode Curl_async_ares_set_dns_interface(struct Curl_easy *data);
/* Set the local ipv4 address to use by ares, from `data` settings. */
CURLcode Curl_async_ares_set_dns_local_ip4(struct Curl_easy *data);
/* Set the local ipv6 address to use by ares, from `data` settings. */
CURLcode Curl_async_ares_set_dns_local_ip6(struct Curl_easy *data);
#endif /* CURLRES_ARES */
#ifdef CURLRES_THREADED
/* async resolving implementation using POSIX threads */
#include "curl_threads.h"
/* Context for threaded address resolver */
struct async_thrdd_addr_ctx {
curl_thread_t thread_hnd;
char *hostname; /* hostname to resolve, Curl_async.hostname
duplicate */
curl_mutex_t mutx;
#ifndef CURL_DISABLE_SOCKETPAIR
curl_socket_t sock_pair[2]; /* eventfd/pipes/socket pair */
#endif
struct Curl_addrinfo *res;
#ifdef HAVE_GETADDRINFO
struct addrinfo hints;
#endif
struct curltime start;
timediff_t interval_end;
unsigned int poll_interval;
int port;
int sock_error;
int ref_count;
BIT(thrd_done);
BIT(do_abort);
};
/* Context for threaded resolver */
struct async_thrdd_ctx {
/* `addr` is a pointer since this memory is shared with a started
* thread. Since threads cannot be killed, we use reference counting
* so that we can "release" our pointer to this memory while the
* thread is still running. */
struct async_thrdd_addr_ctx *addr;
#if defined(USE_HTTPSRR) && defined(USE_ARES)
struct {
ares_channel channel;
struct Curl_https_rrinfo hinfo;
CURLcode result;
BIT(done);
} rr;
#endif
};
void Curl_async_thrdd_shutdown(struct Curl_easy *data);
void Curl_async_thrdd_destroy(struct Curl_easy *data);
#endif /* CURLRES_THREADED */
#ifndef CURL_DISABLE_DOH
struct doh_probes;
#endif
#else /* CURLRES_ASYNCH */
/* convert these functions if an asynch resolver is not used */
#define Curl_async_get_impl(x, y) (*(y) = NULL, CURLE_OK)
#define Curl_async_is_resolved(x, y) CURLE_COULDNT_RESOLVE_HOST
#define Curl_async_await(x, y) CURLE_COULDNT_RESOLVE_HOST
#define Curl_async_global_init() CURLE_OK
#define Curl_async_global_cleanup() Curl_nop_stmt
#endif /* !CURLRES_ASYNCH */
#if defined(CURLRES_ASYNCH) || !defined(CURL_DISABLE_DOH)
#define USE_CURL_ASYNC
#endif
#ifdef USE_CURL_ASYNC
struct Curl_async {
#ifdef CURLRES_ARES
struct async_ares_ctx ares;
#elif defined(CURLRES_THREADED)
struct async_thrdd_ctx thrdd;
#endif
#ifndef CURL_DISABLE_DOH
struct doh_probes *doh; /* DoH specific data for this request */
#endif
struct Curl_dns_entry *dns; /* result of resolving on success */
char *hostname; /* copy of the params resolv started with */
int port;
int ip_version;
BIT(done);
};
/*
* Curl_async_shutdown().
*
* This shuts down all ongoing operations.
*/
void Curl_async_shutdown(struct Curl_easy *data);
/*
* Curl_async_destroy().
*
* This frees the resources of any async resolve.
*/
void Curl_async_destroy(struct Curl_easy *data);
#else /* !USE_CURL_ASYNC */
#define Curl_async_shutdown(x) Curl_nop_stmt
#define Curl_async_destroy(x) Curl_nop_stmt
#endif /* USE_CURL_ASYNC */
/********** end of generic resolver interface functions *****************/
#endif /* HEADER_CURL_ASYN_H */
|
c
|
github
|
https://github.com/curl/curl
|
lib/asyn.h
|
{
"Id": "sha256:44cc64492fb6a6d78d3e6d087f380ae6e479aa1b2c79823b32cdacfcc2f3d715",
"RepoTags": [
"paketo-buildpacks/cnb:base",
"paketo-buildpacks/builder:base-platform-api-0.2"
],
"RepoDigests": [
"paketo-buidpacks/cnb@sha256:5b03a853e636b78c44e475bbc514e2b7b140cc41cca8ab907e9753431ae8c0b0"
],
"Parent": "",
"Comment": "",
"Created": "1980-01-01T00:00:01Z",
"Container": "",
"ContainerConfig": {
"Hostname": "",
"Domainname": "",
"User": "",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": null,
"Cmd": null,
"Image": "",
"Volumes": null,
"WorkingDir": "",
"Entrypoint": null,
"OnBuild": null,
"Labels": null
},
"DockerVersion": "",
"Author": "",
"Config": {
"Hostname": "",
"Domainname": "",
"User": "1000:1000",
"AttachStdin": false,
"AttachStdout": false,
"AttachStderr": false,
"Tty": false,
"OpenStdin": false,
"StdinOnce": false,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"CNB_USER_ID=1000",
"CNB_GROUP_ID=1000",
"CNB_STACK_ID=io.buildpacks.stacks.bionic"
],
"Cmd": [
"/bin/bash"
],
"ArgsEscaped": true,
"Image": "sha256:2d153261a5e359c632a17377cfb5d1986c27b96c8b6e95334bf80f1029dbd4bb",
"Volumes": null,
"WorkingDir": "/layers",
"Entrypoint": null,
"OnBuild": null,
"Labels": {
"io.buildpacks.builder.metadata": "{\"description\":\"Ubuntu bionic base image with buildpacks for Java, NodeJS and Golang\",\"buildpacks\":[{\"id\":\"paketo-buildpacks/dotnet-core\",\"version\":\"0.0.9\",\"homepage\":\"https://github.com/paketo-buildpacks/dotnet-core\"},{\"id\":\"paketo-buildpacks/dotnet-core-runtime\",\"version\":\"0.0.201\",\"homepage\":\"https://github.com/paketo-buildpacks/dotnet-core-runtime\"},{\"id\":\"paketo-buildpacks/dotnet-core-sdk\",\"version\":\"0.0.196\",\"homepage\":\"https://github.com/paketo-buildpacks/dotnet-core-sdk\"},{\"id\":\"paketo-buildpacks/dotnet-execute\",\"version\":\"0.0.180\",\"homepage\":\"https://github.com/paketo-buildpacks/dotnet-execute\"},{\"id\":\"paketo-buildpacks/dotnet-publish\",\"version\":\"0.0.121\",\"homepage\":\"https://github.com/paketo-buildpacks/dotnet-publish\"},{\"id\":\"paketo-buildpacks/dotnet-core-aspnet\",\"version\":\"0.0.196\",\"homepage\":\"https://github.com/paketo-buildpacks/dotnet-core-aspnet\"},{\"id\":\"paketo-buildpacks/java-native-image\",\"version\":\"4.7.0\",\"homepage\":\"https://github.com/paketo-buildpacks/java-native-image\"},{\"id\":\"paketo-buildpacks/spring-boot\",\"version\":\"3.5.0\",\"homepage\":\"https://github.com/paketo-buildpacks/spring-boot\"},{\"id\":\"paketo-buildpacks/executable-jar\",\"version\":\"3.1.3\",\"homepage\":\"https://github.com/paketo-buildpacks/executable-jar\"},{\"id\":\"paketo-buildpacks/graalvm\",\"version\":\"4.1.0\",\"homepage\":\"https://github.com/paketo-buildpacks/graalvm\"},{\"id\":\"paketo-buildpacks/gradle\",\"version\":\"3.5.0\",\"homepage\":\"https://github.com/paketo-buildpacks/gradle\"},{\"id\":\"paketo-buildpacks/leiningen\",\"version\":\"1.2.1\",\"homepage\":\"https://github.com/paketo-buildpacks/leiningen\"},{\"id\":\"paketo-buildpacks/sbt\",\"version\":\"3.6.0\",\"homepage\":\"https://github.com/paketo-buildpacks/sbt\"},{\"id\":\"paketo-buildpacks/spring-boot-native-image\",\"version\":\"2.0.1\",\"homepage\":\"https://github.com/paketo-buildpacks/spring-boot-native-image\"},{\"id\":\"paketo-buildpacks/environment-variables\",\"version\":\"2.1.2\",\"homepage\":\"https://github.com/paketo-buildpacks/environment-variables\"},{\"id\":\"paketo-buildpacks/image-labels\",\"version\":\"2.0.7\",\"homepage\":\"https://github.com/paketo-buildpacks/image-labels\"},{\"id\":\"paketo-buildpacks/maven\",\"version\":\"3.2.1\",\"homepage\":\"https://github.com/paketo-buildpacks/maven\"},{\"id\":\"paketo-buildpacks/java\",\"version\":\"4.10.0\",\"homepage\":\"https://github.com/paketo-buildpacks/java\"},{\"id\":\"paketo-buildpacks/ca-certificates\",\"version\":\"1.0.1\",\"homepage\":\"https://github.com/paketo-buildpacks/ca-certificates\"},{\"id\":\"paketo-buildpacks/environment-variables\",\"version\":\"2.1.2\",\"homepage\":\"https://github.com/paketo-buildpacks/environment-variables\"},{\"id\":\"paketo-buildpacks/executable-jar\",\"version\":\"3.1.3\",\"homepage\":\"https://github.com/paketo-buildpacks/executable-jar\"},{\"id\":\"paketo-buildpacks/procfile\",\"version\":\"3.0.0\",\"homepage\":\"https://github.com/paketo-buildpacks/procfile\"},{\"id\":\"paketo-buildpacks/apache-tomcat\",\"version\":\"3.2.0\",\"homepage\":\"https://github.com/paketo-buildpacks/apache-tomcat\"},{\"id\":\"paketo-buildpacks/gradle\",\"version\":\"3.5.0\",\"homepage\":\"https://github.com/paketo-buildpacks/gradle\"},{\"id\":\"paketo-buildpacks/maven\",\"version\":\"3.2.1\",\"homepage\":\"https://github.com/paketo-buildpacks/maven\"},{\"id\":\"paketo-buildpacks/sbt\",\"version\":\"3.6.0\",\"homepage\":\"https://github.com/paketo-buildpacks/sbt\"},{\"id\":\"paketo-buildpacks/bellsoft-liberica\",\"version\":\"6.2.0\",\"homepage\":\"https://github.com/paketo-buildpacks/bellsoft-liberica\"},{\"id\":\"paketo-buildpacks/google-stackdriver\",\"version\":\"2.16.0\",\"homepage\":\"https://github.com/paketo-buildpacks/google-stackdriver\"},{\"id\":\"paketo-buildpacks/image-labels\",\"version\":\"2.0.7\",\"homepage\":\"https://github.com/paketo-buildpacks/image-labels\"},{\"id\":\"paketo-buildpacks/dist-zip\",\"version\":\"2.2.2\",\"homepage\":\"https://github.com/paketo-buildpacks/dist-zip\"},{\"id\":\"paketo-buildpacks/spring-boot\",\"version\":\"3.5.0\",\"homepage\":\"https://github.com/paketo-buildpacks/spring-boot\"},{\"id\":\"paketo-buildpacks/jmx\",\"version\":\"2.1.4\",\"homepage\":\"https://github.com/paketo-buildpacks/jmx\"},{\"id\":\"paketo-buildpacks/leiningen\",\"version\":\"1.2.1\",\"homepage\":\"https://github.com/paketo-buildpacks/leiningen\"}],\"stack\":{\"runImage\":{\"image\":\"cloudfoundry/run:base-cnb\",\"mirrors\":null}},\"lifecycle\":{\"version\":\"0.7.2\",\"api\":{\"buildpack\":\"0.2\",\"platform\":\"0.3\"}},\"createdBy\":{\"name\":\"Pack CLI\",\"version\":\"v0.9.0 (git sha: d42c384a39f367588f2653f2a99702db910e5ad7)\"}}",
"io.buildpacks.buildpack.layers": "{\"org.cloudfoundry.archiveexpanding\":{\"v1.0.102\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:73b1a8ac1f7fca3d545766ce7fd3c56b40a63724ab78e464d71a29da0c6ac31c\"}},\"org.cloudfoundry.azureapplicationinsights\":{\"v1.1.12\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:a0a2f7c467efbb8b1ac222f09013b88b68f3c117ec6b6e9dc95564be50f271ab\"}},\"org.cloudfoundry.buildsystem\":{\"v1.2.15\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:3f50d3a0e1a969a9606b59e5295842d731e425108cb349ce6c69a5b30ea1bab9\"}},\"org.cloudfoundry.debug\":{\"v1.2.11\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:04559213a01cfac69a8d6a6facb58b8681666525c74f605207c40a61a0f4c9b7\"}},\"org.cloudfoundry.dep\":{\"0.0.101\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.tiny\"}],\"layerDiffID\":\"sha256:6aae3a2d671d369eec34dc9146ef267d06c87461f271fbfbe9136775ecf5dfb8\"}},\"org.cloudfoundry.distzip\":{\"v1.1.12\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:a0715e661e13d7d3ded5bdc068edd01e5b3aa0e2805152f4c8a1428b4e0673df\"}},\"org.cloudfoundry.dotnet-core\":{\"v0.0.6\":{\"api\":\"0.2\",\"order\":[{\"group\":[{\"id\":\"org.cloudfoundry.node-engine\",\"version\":\"0.0.158\",\"optional\":true},{\"id\":\"org.cloudfoundry.icu\",\"version\":\"0.0.43\",\"optional\":true},{\"id\":\"org.cloudfoundry.dotnet-core-runtime\",\"version\":\"0.0.127\",\"optional\":true},{\"id\":\"org.cloudfoundry.dotnet-core-aspnet\",\"version\":\"0.0.118\",\"optional\":true},{\"id\":\"org.cloudfoundry.dotnet-core-sdk\",\"version\":\"0.0.122\",\"optional\":true},{\"id\":\"org.cloudfoundry.dotnet-core-build\",\"version\":\"0.0.68\",\"optional\":true},{\"id\":\"org.cloudfoundry.dotnet-core-conf\",\"version\":\"0.0.115\"}]}],\"layerDiffID\":\"sha256:aa0effdf787ecfe74d60d6771006717fd1a9ce1ce0a8161624baa61b68120357\"}},\"org.cloudfoundry.dotnet-core-aspnet\":{\"0.0.118\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:a06615b5adc1a3afb7abd524e82f6900a28910927fcf0d4e9b85fd1fcbeb53ad\"}},\"org.cloudfoundry.dotnet-core-build\":{\"0.0.68\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:26d6f1e76275d17860005f7ab9b74fdd2283fcf84e0446bd88d49a6b4e9609f9\"}},\"org.cloudfoundry.dotnet-core-conf\":{\"0.0.115\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:55f7c052cf70c8ca01b8e241c0c5c8a9675599d4904c69bfb961a472e246238d\"}},\"org.cloudfoundry.dotnet-core-runtime\":{\"0.0.127\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:d9958b816a9ad179fca8c18d17c07e9814b152d461c685e1443bec6f990ab990\"}},\"org.cloudfoundry.dotnet-core-sdk\":{\"0.0.122\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:52142799a4b687fe6e5cf397c41064499ea6cc554b94904d46c1acade998e11f\"}},\"org.cloudfoundry.go\":{\"v0.0.4\":{\"api\":\"0.2\",\"order\":[{\"group\":[{\"id\":\"org.cloudfoundry.go-compiler\",\"version\":\"0.0.105\"},{\"id\":\"org.cloudfoundry.go-mod\",\"version\":\"0.0.89\"}]},{\"group\":[{\"id\":\"org.cloudfoundry.go-compiler\",\"version\":\"0.0.105\"},{\"id\":\"org.cloudfoundry.dep\",\"version\":\"0.0.101\"}]}],\"layerDiffID\":\"sha256:352a299d6af4773322ed3643d8f98b01aad6f15d838d1852e52a0a3ca56c6efb\"}},\"org.cloudfoundry.go-compiler\":{\"0.0.105\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.tiny\"}],\"layerDiffID\":\"sha256:cb21f14e306d94e437c5418d275bcc6efcea6bc9b3d26a400bdf54fa62242c24\"}},\"org.cloudfoundry.go-mod\":{\"0.0.89\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.tiny\"}],\"layerDiffID\":\"sha256:c9da8171f5ca048109ffba5e940e3a7d2db567eda281f92b0eb483173df06add\"}},\"org.cloudfoundry.googlestackdriver\":{\"v1.1.11\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:ff29efc56c31eeccc79a33c6e4abd7b1ab3547d95e1cf83974af65a493576c41\"}},\"org.cloudfoundry.icu\":{\"0.0.43\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:48063dcdd043f9c88604d10fe9542569be8f8111d46806c96b08d77763ffa347\"}},\"org.cloudfoundry.jdbc\":{\"v1.1.14\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:a9c9bbbd69c212b7ab3c1a7f03011ccc4d99a6fce1bf1c785325c7bcad789e62\"}},\"org.cloudfoundry.jmx\":{\"v1.1.12\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:da62dec6eb4ed884952a1b867fd89e3bfe3c510e5c849cc0ac7050ff867a2469\"}},\"org.cloudfoundry.jvmapplication\":{\"v1.1.12\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:c10732392b97c121a78a5f20201c2a5e834a2b8677196cdd49260a489a54fd22\"}},\"org.cloudfoundry.node-engine\":{\"0.0.158\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:70cf83155575fdb607f23ace41e31b1d5cb1c24dbbbf56f71c383b583724d339\"},\"0.0.163\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:11486cb955594f9d43909b60f94209bb6854f502a5a093207b657afbaa38a777\"}},\"org.cloudfoundry.nodejs\":{\"v2.0.8\":{\"api\":\"0.2\",\"order\":[{\"group\":[{\"id\":\"org.cloudfoundry.node-engine\",\"version\":\"0.0.163\"},{\"id\":\"org.cloudfoundry.yarn-install\",\"version\":\"0.1.10\"}]},{\"group\":[{\"id\":\"org.cloudfoundry.node-engine\",\"version\":\"0.0.163\"},{\"id\":\"org.cloudfoundry.npm\",\"version\":\"0.1.3\"}]}],\"layerDiffID\":\"sha256:76fe727e4aafc7f56f01282296ab736521c38b9d19c1ae5ebb193f9cd55fa109\"}},\"org.cloudfoundry.npm\":{\"0.1.3\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:243bbd007cb0ee99b704bfe0cf62e1301baa4095ab4c39b01293787a0e4234f1\"}},\"org.cloudfoundry.openjdk\":{\"v1.2.14\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:486b2abf434bb90cf04bab74f2f8bd2eb488ff90632b56eac4bddcbbf02e8151\"}},\"org.cloudfoundry.procfile\":{\"v1.1.12\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:b7b78159dfdaa0dd484c58652e02fa6b755abfd0adb88f106d16178144e46f33\"}},\"org.cloudfoundry.springautoreconfiguration\":{\"v1.1.11\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:c185540c10fea822c6db1b987fcfe22b55a4662648124b98475db4c9dcddb2ab\"}},\"org.cloudfoundry.springboot\":{\"v1.2.13\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:b87e68574cc7dccbe974fa760702ef650711036bf144fd9da1f3a2d8f6ac335f\"}},\"org.cloudfoundry.tomcat\":{\"v1.3.18\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"io.buildpacks.stacks.bionic\"},{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"}],\"layerDiffID\":\"sha256:467c0082c57b80b48487a9b8429887c0744ddc5b066b3f7678866bde89b78ab2\"}},\"org.cloudfoundry.yarn-install\":{\"0.1.10\":{\"api\":\"0.2\",\"stacks\":[{\"id\":\"org.cloudfoundry.stacks.cflinuxfs3\"},{\"id\":\"io.buildpacks.stacks.bionic\"}],\"layerDiffID\":\"sha256:6aefa0ba7ce01584b4a531b18e36470298cee3b30ecae0e0c64b532a5cebd6e7\"}}}",
"io.buildpacks.buildpack.order": "[{\"group\":[{\"id\":\"org.cloudfoundry.openjdk\"},{\"id\":\"org.cloudfoundry.buildsystem\",\"optional\":true},{\"id\":\"org.cloudfoundry.jvmapplication\"},{\"id\":\"org.cloudfoundry.tomcat\",\"optional\":true},{\"id\":\"org.cloudfoundry.springboot\",\"optional\":true},{\"id\":\"org.cloudfoundry.distzip\",\"optional\":true},{\"id\":\"org.cloudfoundry.procfile\",\"optional\":true},{\"id\":\"org.cloudfoundry.azureapplicationinsights\",\"optional\":true},{\"id\":\"org.cloudfoundry.debug\",\"optional\":true},{\"id\":\"org.cloudfoundry.googlestackdriver\",\"optional\":true},{\"id\":\"org.cloudfoundry.jdbc\",\"optional\":true},{\"id\":\"org.cloudfoundry.jmx\",\"optional\":true},{\"id\":\"org.cloudfoundry.springautoreconfiguration\",\"optional\":true}]},{\"group\":[{\"id\":\"org.cloudfoundry.nodejs\"}]},{\"group\":[{\"id\":\"org.cloudfoundry.go\"}]},{\"group\":[{\"id\":\"org.cloudfoundry.dotnet-core\"}]},{\"group\":[{\"id\":\"org.cloudfoundry.procfile\"}]}]",
"io.buildpacks.stack.id": "io.buildpacks.stacks.bionic",
"io.buildpacks.stack.mixins": "[\"build:git\",\"build:build-essential\"]"
}
},
"Architecture": "arm64",
"Os": "linux",
"Variant": "v1",
"Size": 688884758,
"VirtualSize": 688884758,
"GraphDriver": {
"Data": {
"LowerDir": "/var/lib/docker/overlay2/6a79181b2840da2706624f46ce5abd4448973b4f951925d5a276b273256063b2/diff:/var/lib/docker/overlay2/429419a203100f60ab16ec6c879fce975c8138422b9053f80accd6124c730fc2/diff:/var/lib/docker/overlay2/6e45ed6daf4f4f3b90fd1ec5fa958775000875661d3e8be3f1af218d192b058d/diff:/var/lib/docker/overlay2/22928ad308cdd55b3fe849d92b6e38c6bc303ba7c9beb8c0e79aa958e16b1864/diff:/var/lib/docker/overlay2/2ca9ec213226a1604f57c8e141d6f1168134a5cb2ccd8f91ee9be5a39036e6bf/diff:/var/lib/docker/overlay2/96ae944fe00ec20cf5b4441b112ebcc9395faaf08108c9ee38c62e1da33af1c8/diff:/var/lib/docker/overlay2/13ee52e300e476e27350c9ac6274dedf26af85c3079b42a41f9dfc92eff57a80/diff:/var/lib/docker/overlay2/223edb4cc62a2ba2b8bda866905a55c4798c6c32e31d22d60e6ed4f3169ce85e/diff:/var/lib/docker/overlay2/a41235cd7277299cb74ead47def3771885948719e24075ea3bf37580f3af7ae2/diff:/var/lib/docker/overlay2/ed0438e8e2c27b9d62ad21a0761237c350a2ffc9e52f47c019e4f627091c832e/diff:/var/lib/docker/overlay2/0c27c8229b31eafc57ab739b44962dcc07b72f3d8950888873ecb3cfd385032f/diff:/var/lib/docker/overlay2/0957cbcca052cd58bcf9a3d945b0e6876b0df79c1c534da1872c3415a019427d/diff:/var/lib/docker/overlay2/b621414d53d71349c07df8ed45e3e04b2e97bfbaf4bf0d86463f46e0f810eeb4/diff:/var/lib/docker/overlay2/ad521bc47f0bb44262358cf47c3d81a544d098494cf24a5b510620d34eb9c353/diff:/var/lib/docker/overlay2/081501d5bfbd927e69c10eb320513c7c0d5f00bea8cf9e55faa90579fd33adf4/diff:/var/lib/docker/overlay2/fb1ba66bee5568f5700c72865d020d4171a62bfdd099c3cc05b9a253d36a35a4/diff:/var/lib/docker/overlay2/06bcc6b3adeca727d554f1a745ee33242dfe1b3c6392023ac947666057303288/diff:/var/lib/docker/overlay2/1c5397d63d893202dffde29013ee826fb695bda26c718ee03ddde376be4da0a3/diff:/var/lib/docker/overlay2/76075fb7fd3c6b3fb116fb3b464e220918e56d94461c61af9a1aff288ebdba60/diff:/var/lib/docker/overlay2/43d1026bb7b618393912ecc9ddf57b604336184d5f8dc70bcf6332b5f08a3e8d/diff:/var/lib/docker/overlay2/ee27d1fba3deaca0556f7bab171cb3368f169011dd132cf335b5308728f6db8f/diff:/var/lib/docker/overlay2/464d3ec8d86ff31dcb5063ea25521368ea8e9c7964f65e15ff5e0e1ecdbe991e/diff:/var/lib/docker/overlay2/a4a80c33c8b78f68bdc9dbd5903cc2ba1d48e78b9a97d43acb018823ece8e6cb/diff:/var/lib/docker/overlay2/6494f2f1693cff8b16d51fa95620eb0bb691a76fb39b5175d953649577791297/diff:/var/lib/docker/overlay2/9d49e146f82eb5fc4fd81613538e9c5f5f95091fbbc8c49729c6c9140ae356de/diff:/var/lib/docker/overlay2/2934818c52bcd017abe000e71342d67fbc9ccb7dbc165ce05e3250e2110229a5/diff:/var/lib/docker/overlay2/651ca06b2bf75e2122855264287fc937f30d2b49229d628909895be7128b4eb6/diff:/var/lib/docker/overlay2/c93bab59be44fa1b66689dc059d26742d00d2e787d06c3236e1f116199c9807e/diff:/var/lib/docker/overlay2/d0a8e2a0c7e0df172f7a8ebe75e2dce371bb6cc65531b06799bc677c5b5e3627/diff:/var/lib/docker/overlay2/7d14bac240e0d7936351e3fac80b7fbe2a209f4de8992091c4f75e41f9627852/diff:/var/lib/docker/overlay2/d6b192ea137a4ae95e309d263ee8c890e35da02aacd9bdcf5adbd4c28a0c0a3f/diff:/var/lib/docker/overlay2/335bfb632ab7723e25fb5dc7b67389e6ec38178ef10bfbf83337501403e61574/diff:/var/lib/docker/overlay2/0293c7e3472da58f51cbdf15fb293ff71e32c1f80f83f00fb09f8941deef5e43/diff:/var/lib/docker/overlay2/55faa8b47bcb0dd29c3836580f451a0461dd499065af9c830beff6e8329ab484/diff:/var/lib/docker/overlay2/afcb6e109c1ba7d71b8a8b7e573d4ce04f22da3fe0ee523359db5cfb95e65bb6/diff:/var/lib/docker/overlay2/b42eefd9bf6629ae9d16e7aba6ba3939d37816aba7a0999f6d639012a3119be1/diff:/var/lib/docker/overlay2/a9832c8f81ee889a622ce4d95d9f4bab2f91d30e18f69bfd7cfc385c781068d4/diff:/var/lib/docker/overlay2/224041c135f13881a98b9e833584bedab81d5650061457f522a1ebd1daa2c77a/diff:/var/lib/docker/overlay2/73dfd4e2075fccb239b3d5e9b33b32b8e410bdc3cd5a620b41346f44cc5c51f7/diff:/var/lib/docker/overlay2/b3924ed7c91730f6714d33c455db888604b59ab093033b3f59ac16ecdd777987/diff:/var/lib/docker/overlay2/e36a32cd0ab20b216a8db1a8a166b17464399e4d587d22504088a7a6ef0a68a4/diff:/var/lib/docker/overlay2/3334e94fe191333b65f571912c0fcfbbf31aeb090a2fb9b4cfdbc32a37c0fe5f/diff",
"MergedDir": "/var/lib/docker/overlay2/f5d133c5929da8cc8266cbbc3e36f924f4a9c835f943fb436445a26b7e1bcc56/merged",
"UpperDir": "/var/lib/docker/overlay2/f5d133c5929da8cc8266cbbc3e36f924f4a9c835f943fb436445a26b7e1bcc56/diff",
"WorkDir": "/var/lib/docker/overlay2/f5d133c5929da8cc8266cbbc3e36f924f4a9c835f943fb436445a26b7e1bcc56/work"
},
"Name": "overlay2"
},
"RootFS": {
"Type": "layers",
"Layers": [
"sha256:c8be1b8f4d60d99c281fc2db75e0f56df42a83ad2f0b091621ce19357e19d853",
"sha256:977183d4e9995d9cd5ffdfc0f29e911ec9de777bcb0f507895daa1068477f76f",
"sha256:6597da2e2e52f4d438ad49a14ca79324f130a9ea08745505aa174a8db51cb79d",
"sha256:16542a8fc3be1bfaff6ed1daa7922e7c3b47b6c3a8d98b7fca58b9517bb99b75",
"sha256:2df36adfe1af661aebb75a0db796b074bb8f861fbc8f98f6f642570692b3b133",
"sha256:f499c7d34e01d860492ef1cc34b7d7e1319b3c3c81ee7d23258b21605b5902ca",
"sha256:c4bf1d4e5d4adb566b173a0769d247f67c5dd8ff90dfdcebd8c7060f1c06caa9",
"sha256:15259abd479904cbe0d8d421e5b05b2e5745e2bf82e62cdd7fb6d3eafbe4168a",
"sha256:6aa3691a73805f608e5fce69fb6bc89aec8362f58a6b4be2682515e9cfa3cc1a",
"sha256:2d6ad1b66f5660dd860c1fe2d90d26398fcfab4dc1c87c3d5e7c0fc24f8d6fb2",
"sha256:ff29efc56c31eeccc79a33c6e4abd7b1ab3547d95e1cf83974af65a493576c41",
"sha256:b87e68574cc7dccbe974fa760702ef650711036bf144fd9da1f3a2d8f6ac335f",
"sha256:04559213a01cfac69a8d6a6facb58b8681666525c74f605207c40a61a0f4c9b7",
"sha256:467c0082c57b80b48487a9b8429887c0744ddc5b066b3f7678866bde89b78ab2",
"sha256:352a299d6af4773322ed3643d8f98b01aad6f15d838d1852e52a0a3ca56c6efb",
"sha256:486b2abf434bb90cf04bab74f2f8bd2eb488ff90632b56eac4bddcbbf02e8151",
"sha256:3f50d3a0e1a969a9606b59e5295842d731e425108cb349ce6c69a5b30ea1bab9",
"sha256:c10732392b97c121a78a5f20201c2a5e834a2b8677196cdd49260a489a54fd22",
"sha256:c185540c10fea822c6db1b987fcfe22b55a4662648124b98475db4c9dcddb2ab",
"sha256:73b1a8ac1f7fca3d545766ce7fd3c56b40a63724ab78e464d71a29da0c6ac31c",
"sha256:da62dec6eb4ed884952a1b867fd89e3bfe3c510e5c849cc0ac7050ff867a2469",
"sha256:76fe727e4aafc7f56f01282296ab736521c38b9d19c1ae5ebb193f9cd55fa109",
"sha256:a9c9bbbd69c212b7ab3c1a7f03011ccc4d99a6fce1bf1c785325c7bcad789e62",
"sha256:b7b78159dfdaa0dd484c58652e02fa6b755abfd0adb88f106d16178144e46f33",
"sha256:aa0effdf787ecfe74d60d6771006717fd1a9ce1ce0a8161624baa61b68120357",
"sha256:a0a2f7c467efbb8b1ac222f09013b88b68f3c117ec6b6e9dc95564be50f271ab",
"sha256:a0715e661e13d7d3ded5bdc068edd01e5b3aa0e2805152f4c8a1428b4e0673df",
"sha256:6aae3a2d671d369eec34dc9146ef267d06c87461f271fbfbe9136775ecf5dfb8",
"sha256:cb21f14e306d94e437c5418d275bcc6efcea6bc9b3d26a400bdf54fa62242c24",
"sha256:c9da8171f5ca048109ffba5e940e3a7d2db567eda281f92b0eb483173df06add",
"sha256:11486cb955594f9d43909b60f94209bb6854f502a5a093207b657afbaa38a777",
"sha256:243bbd007cb0ee99b704bfe0cf62e1301baa4095ab4c39b01293787a0e4234f1",
"sha256:6aefa0ba7ce01584b4a531b18e36470298cee3b30ecae0e0c64b532a5cebd6e7",
"sha256:a06615b5adc1a3afb7abd524e82f6900a28910927fcf0d4e9b85fd1fcbeb53ad",
"sha256:26d6f1e76275d17860005f7ab9b74fdd2283fcf84e0446bd88d49a6b4e9609f9",
"sha256:55f7c052cf70c8ca01b8e241c0c5c8a9675599d4904c69bfb961a472e246238d",
"sha256:d9958b816a9ad179fca8c18d17c07e9814b152d461c685e1443bec6f990ab990",
"sha256:52142799a4b687fe6e5cf397c41064499ea6cc554b94904d46c1acade998e11f",
"sha256:48063dcdd043f9c88604d10fe9542569be8f8111d46806c96b08d77763ffa347",
"sha256:70cf83155575fdb607f23ace41e31b1d5cb1c24dbbbf56f71c383b583724d339",
"sha256:6cf0f8f815d5371cf5c04e7ebf76c62467948d693b8343184d1446036980d261",
"sha256:7cbffcbb09fc5e9d00372e80990016609c09cc3113429ddc951c4a19b1a5ec72",
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"
]
},
"Metadata": {
"LastTagTime": "0001-01-01T00:00:00Z"
}
}
|
json
|
github
|
https://github.com/spring-projects/spring-boot
|
buildpack/spring-boot-buildpack-platform/src/test/resources/org/springframework/boot/buildpack/platform/build/image-with-platform.json
|
{
"private": true,
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start"
},
"dependencies": {
"@mantine/core": "^4.1.4",
"@mantine/hooks": "^4.1.4",
"@mantine/next": "^4.1.4",
"next": "latest",
"react": "18.0.0",
"react-dom": "18.0.0"
},
"devDependencies": {
"@types/node": "17.0.25",
"@types/react": "18.2.8",
"@types/react-dom": "18.0.1",
"prettier": "2.6.2",
"typescript": "4.6.3"
}
}
|
json
|
github
|
https://github.com/vercel/next.js
|
examples/with-mantine/package.json
|
import logging, requests
class Writer:
def add_args(self, parser):
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--port', type=int, default=5984)
parser.add_argument('--name', type=str, required=True)
parser.add_argument('--user', type=str)
parser.add_argument('--password', type=str)
parser.add_argument('--bulk_size', type=int, default=10)
return parser
def write(self, args, docs):
self._configure(args)
self._create_db(args.name)
self._create_docs(args.name, docs)
def __init__(self):
self.log = logging.getLogger(__name__)
requests_logger_name = 'requests.packages.urllib3.connectionpool'
requests_log = logging.getLogger(requests_logger_name)
requests_log.disabled = True
return None
def _configure(self, args):
if args.format != 'raw':
raise ValueError('Only accept format is "raw"')
self.base_url = 'http://{0:s}:{1:d}'.format(args.host, args.port)
self.headers = {'content-type': 'application/json'}
if args.user:
user = args.user
password = args.password
self.auth = (user, password)
else:
self.auth = ()
self.bulk_size = args.bulk_size
return None
def _create_db(self, db_name):
url = '{base_url}/{db_name}'.format(
base_url=self.base_url, db_name=db_name)
resp = requests.put(url, auth=self.auth, headers=self.headers)
if resp.status_code != 201:
raise ValueError(resp.json())
self.log.debug('Created database {db_name}'.format(db_name=db_name))
return db_name
def _create_docs(self, db_name, docs):
size = self.bulk_size
chunks = [docs[i:i + size] for i in range(0, len(docs), size)]
self.log.debug('Populating database {db}'.format(db=db_name))
for chunk in chunks:
self._bulk_insert(db_name, chunk)
return True
def _bulk_insert(self, db_name, docs):
url = '{base_url}/{db_name}/_bulk_docs'.format(
base_url=self.base_url, db_name=db_name)
resp = requests.post(url, auth=self.auth, headers=self.headers,
json={'docs': docs})
if resp.status_code != 201:
raise ValueError(resp.json())
self.log.debug('Added {0:d} docs to database {db_name}'.format(len(docs),
db_name=db_name))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# OJO: La parte de base de datos sólo está implementada a manera de prueba de concepto. (FK's fijos, etc.)
# pip install bs4
from bs4 import BeautifulSoup
# Este no recuerdo si lo bajé con pip, si da error debe ser eso.
import requests
# pip install urlextract
from urlextract import URLExtract
# pip install sqlalchemy
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, relationship
# Instancia de declarative_base, todas las clases para mapeo la utilizan.
Base = declarative_base()
# Modelamos una clase "Band" para todos los atributos que extraímos.
class Band(Base):
__tablename__ = "Band"
id = Column('id', Integer, primary_key=True)
name = Column('name', String)
country = Column('country', String)
location = Column('location', String)
status = Column('status', String)
formed_in = Column('formed_in', String)
years_active = Column('years_active', String)
genre = Column('genre', String)
lyrical_themes = Column('lyrical_themes', String)
label = Column('label', String)
# Modelamos una clase "Discography" para todos los atributos que extraímos.
class Discography(Base):
__tablename__ = "Discography"
id = Column('id', Integer, primary_key=True)
name = Column('name', String)
release_type = Column('release_type', String)
year = Column('year', String)
band_id = Column('band_id', Integer, ForeignKey("Band.id"), nullable=False)
# Modelamos una clase "Member" para todos los atributos que extraímos.
class Member(Base):
__tablename__ = "Member"
id = Column('id', Integer, primary_key=True)
name = Column('name', String)
band_id = Column('band_id', Integer, ForeignKey("Band.id"), nullable=False)
# Se crea el engine para guardar todo en un archivo .db.
# Si no existen las tablas las crea automáticamente y es autoincremental por default.
engine = create_engine('sqlite:///swedish_bands.db', echo=True)
# Estos dos son necesarios para cada sesión de base de datos.
Base.metadata.create_all(bind=engine)
Session = sessionmaker(bind=engine)
def soup_it_up():
# El URL puede ser cualquier perfil de banda en MA, -debería- funcionar. :D
url = 'https://www.metal-archives.com/bands/Abomation/3540394749'
# Va por el response usando el URL anterior.
r = requests.get(url)
# Convierte el response en un objeto BeautifulSoup para su uso.
soup = BeautifulSoup(r.text, 'html.parser')
# Un pequeño menú para facilitar el uso.
while True:
input_string = "¿Qué desea hacer?\n\t1. Obtener atributos generales."
input_string += "\n\t2. Obtener discografía."
input_string += "\n\t3. Obtener miembros."
input_string += "\n\t4. Salir.\n"
opt = input(input_string)
if opt != '4':
if opt == '1':
get_band_attributes(soup)
print(">>Los atributos de la banda han sido guardados en base de datos.")
elif opt == '2':
get_band_disco(soup)
print(">>La discografía ha sido guardada en base de datos.")
elif opt == '3':
get_band_members(soup)
print(">>Los miembros han sido guardados en base de datos.")
else:
break
def get_band_attributes(soup):
# Instancias tanto para la sesión como para la clase que definimos para modelar las bandas.
session = Session()
band = Band()
# Del objeto "soup" (el contenido será parecido a band_page.html) que viene como parámetro:
# -> Busca <h1 class="band_name">, que es el tag donde se encuentra el nombre de la banda.
band_name = soup.find("h1", {"class": "band_name"})
# -> Buscamos en <dd> que es donde se encuentran los atributos generales.
# -> Regresa una lista de tags y su contenido.
attributes = soup.find_all("dd")
# Una lista para usar los datos sin los whitespaces molestos que trae 'years_active'.
formatted_attributes = []
# -> Por cada elemento de la lista.
for atr in attributes:
# -> Separa los strings en listas.
s_list = atr.getText().split()
# -> Se junta de nuevo como string pero con whitespaces normales.
s = " ".join(map(str, s_list))
# -> Se pasa a lista de nuevo ya spliteado.
formatted_attributes.append(s)
# Añadimos a base de datos.
# No se requieren queries para insertar, sólo asignar al objeto instanciado.
band.name = band_name.getText()
band.country = formatted_attributes[0]
band.location = formatted_attributes[1]
band.status = formatted_attributes[2]
band.formed_in = formatted_attributes[3]
band.genre = formatted_attributes[4]
band.lyrical_themes = formatted_attributes[5]
band.label = formatted_attributes[6]
band.years_active = formatted_attributes[7]
# Hacemos una especie de staging a los cambios.
session.add(band)
# Guardamos los cambios a base de datos.
session.commit()
# Cerramos sesión.
session.close()
def get_band_disco(soup):
# Instancia de URLExtract.
extractor = URLExtract()
# Abrimos sesión con la base de datos.
session = Session()
# Del objeto "soup" (el contenido será parecido a band_page.html) encuentra <div id="band_disco">.
disco_finder = soup.find("div", {"id": "band_disco"})
# Los tags resultantes pasan a string.
s_disco_finder = str(disco_finder)
# Extrae todos los URLs presentes.
disco_url = extractor.find_urls(s_disco_finder)
# Toma el primer URL y asignalo a una variable.
url = disco_url[0]
# Hace un request con dicho URL.
r = requests.get(url)
# Convierte el response en un objeto BeautifulSoup para su uso.
disco_soup = BeautifulSoup(r.text, 'html.parser')
# Del objeto "disco_soup" (el contenido será parecido a disco.html) obtiene todos los tags <tr>.
disco_entries = disco_soup.find_all("tr")
# Elimina el primero porque no se necesita.
disco_entries.pop(0)
# -> Por cada elemento en disco_entries:
for item in disco_entries:
# -> Instanciamos la discografía e insertamos.
discography = Discography()
discography.band_id = 1
# -> Con un ciclo mientras x < 3:
for x in range(3):
# -> Busca todos los tags <td> usando el índice 'x'.
try:
s = item.find_all("td")[x]
except:
session.close()
return
# -> Como en este caso los atributos de la discografía vienen en 3 partes, condicionamos:
if x == 0:
discography.name = str(s.getText())
if x == 1:
discography.release_type = str(s.getText())
if x == 2:
discography.year = str(s.getText())
# -> Una vez que termina de construir el row le damos stage.
session.add(discography)
# Guardamos los cambios en base de datos.
session.commit()
# Cerramos sesión.
session.close()
def get_band_members(soup):
# Abrimos sesión con la base de datos.
session = Session()
# Del objeto "soup" (el contenido será parecido a band_page.html) encuentra <div id="band_tab_members_current">.
current_members = soup.find("div", {"id": "band_tab_members_current"})
# De la búsqueda anterior encuentra todos los <a class="bold">.
member_finder = current_members.find_all("a", {"class": "bold"})
# -> Con un ciclo mientras x < tamaño de member_finder.
for x in range(len(member_finder)):
# -> Instanciamos la clase miembro e insertamos.
member = Member()
member.band_id = 1
member.name = str(member_finder[x].getText())
# Stage al row nuevo.
session.add(member)
# Guardamos los cambios en base de datos.
session.commit()
# Cerramos la sesión.
session.close()
if __name__ == '__main__':
soup_it_up()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import itertools
import re
import types
from sqlparse import tokens as T
from sqlparse.sql import *
def _group_left_right(tlist, ttype, value, cls,
check_right=lambda t: True,
include_semicolon=False):
[_group_left_right(sgroup, ttype, value, cls, check_right,
include_semicolon) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, cls)]
idx = 0
token = tlist.token_next_match(idx, ttype, value)
while token:
right = tlist.token_next(tlist.token_index(token))
left = tlist.token_prev(tlist.token_index(token))
if (right is None or not check_right(right)
or left is None):
token = tlist.token_next_match(tlist.token_index(token)+1,
ttype, value)
else:
if include_semicolon:
right = tlist.token_next_match(tlist.token_index(right),
T.Punctuation, ';')
tokens = tlist.tokens_between(left, right)[1:]
if not isinstance(left, cls):
new = cls([left])
new_idx = tlist.token_index(left)
tlist.tokens.remove(left)
tlist.tokens.insert(new_idx, new)
left = new
left.tokens.extend(tokens)
for t in tokens:
tlist.tokens.remove(t)
token = tlist.token_next_match(tlist.token_index(left)+1,
ttype, value)
def _group_matching(tlist, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon=False, recurse=False):
def _find_matching(i, tl, stt, sva, ett, eva):
depth = 1
for t in tl.tokens[i:]:
if t.match(stt, sva):
depth += 1
elif t.match(ett, eva):
depth -= 1
if depth == 1:
return t
return None
[_group_matching(sgroup, start_ttype, start_value, end_ttype, end_value,
cls, include_semicolon) for sgroup in tlist.get_sublists()
if recurse]
if isinstance(tlist, cls):
idx = 1
else:
idx = 0
token = tlist.token_next_match(idx, start_ttype, start_value)
while token:
tidx = tlist.token_index(token)
end = _find_matching(tidx, tlist, start_ttype, start_value,
end_ttype, end_value)
if end is None:
idx = tidx+1
else:
if include_semicolon:
next_ = tlist.token_next(tlist.token_index(end))
if next_ and next_.match(T.Punctuation, ';'):
end = next_
group = tlist.group_tokens(cls, tlist.tokens_between(token, end))
_group_matching(group, start_ttype, start_value,
end_ttype, end_value, cls, include_semicolon)
idx = tlist.token_index(group)+1
token = tlist.token_next_match(idx, start_ttype, start_value)
def group_if(tlist):
_group_matching(tlist, T.Keyword, 'IF', T.Keyword, 'END IF', If, True)
def group_for(tlist):
_group_matching(tlist, T.Keyword, 'FOR', T.Keyword, 'END LOOP', For, True)
def group_as(tlist):
_group_left_right(tlist, T.Keyword, 'AS', Identifier)
def group_assignment(tlist):
_group_left_right(tlist, T.Assignment, ':=', Assignment,
include_semicolon=True)
def group_comparsion(tlist):
_group_left_right(tlist, T.Operator, None, Comparsion)
def group_case(tlist):
_group_matching(tlist, T.Keyword, 'CASE', T.Keyword, 'END', Case,
include_semicolon=True, recurse=True)
def group_identifier(tlist):
def _consume_cycle(tl, i):
x = itertools.cycle((lambda y: y.match(T.Punctuation, '.'),
lambda y: y.ttype in (T.String.Symbol,
T.Name,
T.Wildcard)))
for t in tl.tokens[i:]:
if x.next()(t):
yield t
else:
raise StopIteration
# bottom up approach: group subgroups first
[group_identifier(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, Identifier)]
# real processing
idx = 0
token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name))
while token:
identifier_tokens = [token]+list(
_consume_cycle(tlist,
tlist.token_index(token)+1))
group = tlist.group_tokens(Identifier, identifier_tokens)
idx = tlist.token_index(group)+1
token = tlist.token_next_by_type(idx, (T.String.Symbol, T.Name))
def group_identifier_list(tlist):
[group_identifier_list(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, (Identifier, IdentifierList))]
idx = 0
# Allowed list items
fend1_funcs = [lambda t: isinstance(t, Identifier),
lambda t: t.is_whitespace(),
lambda t: t.ttype == T.Wildcard,
lambda t: t.match(T.Keyword, 'null'),
lambda t: t.ttype == T.Number.Integer,
lambda t: t.ttype == T.String.Single,
lambda t: isinstance(t, Comparsion),
]
tcomma = tlist.token_next_match(idx, T.Punctuation, ',')
start = None
while tcomma is not None:
before = tlist.token_prev(tcomma)
after = tlist.token_next(tcomma)
# Check if the tokens around tcomma belong to a list
bpassed = apassed = False
for func in fend1_funcs:
if before is not None and func(before):
bpassed = True
if after is not None and func(after):
apassed = True
if not bpassed or not apassed:
# Something's wrong here, skip ahead to next ","
start = None
tcomma = tlist.token_next_match(tlist.token_index(tcomma)+1,
T.Punctuation, ',')
else:
if start is None:
start = before
next_ = tlist.token_next(after)
if next_ is None or not next_.match(T.Punctuation, ','):
# Reached the end of the list
tokens = tlist.tokens_between(start, after)
group = tlist.group_tokens(IdentifierList, tokens)
start = None
tcomma = tlist.token_next_match(tlist.token_index(group)+1,
T.Punctuation, ',')
else:
tcomma = next_
def group_parenthesis(tlist):
_group_matching(tlist, T.Punctuation, '(', T.Punctuation, ')', Parenthesis)
def group_comments(tlist):
[group_comments(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, Comment)]
idx = 0
token = tlist.token_next_by_type(idx, T.Comment)
while token:
tidx = tlist.token_index(token)
end = tlist.token_not_matching(tidx+1,
[lambda t: t.ttype in T.Comment,
lambda t: t.is_whitespace()])
if end is None:
idx = tidx + 1
else:
eidx = tlist.token_index(end)
grp_tokens = tlist.tokens_between(token,
tlist.token_prev(eidx, False))
group = tlist.group_tokens(Comment, grp_tokens)
idx = tlist.token_index(group)
token = tlist.token_next_by_type(idx, T.Comment)
def group_where(tlist):
[group_where(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, Where)]
idx = 0
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
stopwords = ('ORDER', 'GROUP', 'LIMIT', 'UNION')
while token:
tidx = tlist.token_index(token)
end = tlist.token_next_match(tidx+1, T.Keyword, stopwords)
if end is None:
end = tlist.tokens[-1]
else:
end = tlist.tokens[tlist.token_index(end)-1]
group = tlist.group_tokens(Where, tlist.tokens_between(token, end))
idx = tlist.token_index(group)
token = tlist.token_next_match(idx, T.Keyword, 'WHERE')
def group_aliased(tlist):
[group_aliased(sgroup) for sgroup in tlist.get_sublists()
if not isinstance(sgroup, Identifier)]
idx = 0
token = tlist.token_next_by_instance(idx, Identifier)
while token:
next_ = tlist.token_next(tlist.token_index(token))
if next_ is not None and isinstance(next_, Identifier):
grp = tlist.tokens_between(token, next_)[1:]
token.tokens.extend(grp)
for t in grp:
tlist.tokens.remove(t)
idx = tlist.token_index(token)+1
token = tlist.token_next_by_instance(idx, Identifier)
def group_typecasts(tlist):
_group_left_right(tlist, T.Punctuation, '::', Identifier)
def group(tlist):
for func in [group_parenthesis,
group_comments,
group_where,
group_case,
group_identifier,
group_typecasts,
group_as,
group_aliased,
group_assignment,
group_comparsion,
group_identifier_list,
group_if,
group_for,]:
func(tlist)
|
unknown
|
codeparrot/codeparrot-clean
| ||
'use strict';
const common = require('../common.js');
const assert = require('node:assert');
const bench = common.createBenchmark(main, {
n: [1e6],
}, { flags: ['--expose-internals'] });
function main({ n }) {
const { EventTarget } = require('internal/event_target');
let target;
bench.start();
for (let i = 0; i < n; i++) {
target = new EventTarget();
}
bench.end(n);
// Avoid V8 deadcode (elimination)
assert.ok(target);
}
|
javascript
|
github
|
https://github.com/nodejs/node
|
benchmark/events/eventtarget-creation.js
|
# Copyright (c) Aaron Gallagher <_@habnab.it>
# See COPYING for details.
import collections
import cProfile
import inspect
import sys
from twisted.internet import defer
from twisted.python import log
FunctionData = collections.namedtuple('FunctionData', ['calls', 'time'])
FunctionCall = collections.namedtuple('FunctionCall', ['count', 'time'])
EMPTY_CALL = FunctionCall(0, 0)
class Function(collections.namedtuple('Function', ['filename', 'func'])):
@classmethod
def of_frame(cls, frame):
return cls(frame.f_code.co_filename, frame.f_code.co_name)
class FakeFrame(object):
def __init__(self, code, back):
self.f_code = code
self.f_back = back
class Tracer(object):
"""
A tracer for Deferred-returning functions.
The general idea is that if a function returns a Deferred, said Deferred
will have a callback attached to it for timing how long it takes before the
Deferred fires. Then, that time is recorded along with the function and all
of its callers.
"""
def __init__(self, reactor=None):
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self._deferreds = {}
self._function_data = {}
self._wrapped_profiler = None
def _trace(self, frame, event, arg):
if self._wrapped_profiler is not None:
self._wrapped_profiler(frame, event, arg)
if event != 'return':
return
# Don't care about generators; inlineCallbacks is handled separately.
if frame.f_code.co_flags & inspect.CO_GENERATOR:
return
# If it's not a deferred, we don't care either.
if not isinstance(arg, defer.Deferred):
return
# Tracing functions from twisted.internet.defer adds a lot of noise, so
# don't do that except for unwindGenerator.
if frame.f_globals.get('__name__') == 'twisted.internet.defer':
# Detect when unwindGenerator returns. unwindGenerator is part of
# the inlineCallbacks implementation. If unwindGenerator is
# returning, it means that the Deferred being returned is the
# Deferred that will be returned from the wrapped function. Yank
# the wrapped function out and fake a call stack that makes it look
# like unwindGenerator isn't involved at all and the wrapped
# function is being called directly. This /does/ involve Twisted
# implementation details, but as far back as twisted 2.5.0 (when
# inlineCallbacks was introduced), the name 'unwindGenerator' and
# the local 'f' are the same. If this ever changes in the future,
# I'll have to update this code.
if frame.f_code.co_name == 'unwindGenerator':
wrapped_func = frame.f_locals['f']
frame = FakeFrame(wrapped_func.func_code, frame.f_back)
else:
return
key = frame, arg
self._deferreds[key] = self._reactor.seconds()
arg.addBoth(self._deferred_fired, key)
def _get_function(self, frame):
func = Function.of_frame(frame)
data = self._function_data.get(func)
if data is None:
data = self._function_data[func] = FunctionData({}, 0)
return func, data
def _deferred_fired(self, result, key):
fired_at = self._reactor.seconds()
returned_at = self._deferreds.pop(key, None)
if returned_at is None:
return
delta = int((fired_at - returned_at) * 1000000)
frame, _ = key
try:
self._record_timing(delta, frame)
except Exception:
log.err(None, 'an error occurred recording timing information')
return result
def _record_timing(self, delta, frame):
frame_func, frame_data = self._get_function(frame)
self._function_data[frame_func] = frame_data._replace(
time=frame_data.time + delta)
while frame.f_back is not None:
caller = frame.f_back
frame_func = Function.of_frame(frame)
_, caller_data = self._get_function(caller)
call = caller_data.calls.get(frame_func, EMPTY_CALL)
caller_data.calls[frame_func] = call._replace(
count=call.count + 1, time=call.time + delta)
frame = caller
def install(self):
"""
Install this tracer as a global `profile hook
<https://docs.python.org/2/library/sys.html#sys.setprofile>`_.
The old profile hook, if one is set, will continue to be called by this
tracer.
"""
extant_profiler = sys.getprofile()
if isinstance(extant_profiler, cProfile.Profile):
raise RuntimeError(
"the pure-python Tracer is unable to compose over cProfile's "
"profile function; you must disable cProfile before "
"installing this Tracer.")
self._wrapped_profiler = extant_profiler
sys.setprofile(self._trace)
def uninstall(self):
"""
Deactivate this tracer.
If another profile hook was installed after this tracer was installed,
nothing will happen. If a different profile hook was installed prior to
calling ``install()``, it will be restored.
"""
if sys.getprofile() == self._trace:
sys.setprofile(self._wrapped_profiler)
def write_data(self, fobj):
"""
Write profiling data in `callgrind format
<http://valgrind.org/docs/manual/cl-format.html>`_ to an open file
object.
The file object will not be closed.
"""
fobj.write('events: Nanoseconds\n')
for func, data in sorted(self._function_data.iteritems()):
fobj.write('fn={0.func} {0.filename}\n'.format(func))
fobj.write('0 {0.time}\n'.format(data))
for callee, call in sorted(data.calls.iteritems()):
fobj.write('cfn={0.func} {0.filename}\n'.format(callee))
fobj.write('calls={0.count} 0\n0 {0.time}\n'.format(call))
fobj.write('\n')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from core.platform import models
from core.tests import test_utils
(user_models,) = models.Registry.import_models([models.NAMES.user])
class ExplorationUserDataModelTest(test_utils.GenericTestBase):
"""Tests for the ExplorationUserDataModel class."""
DATETIME_OBJECT = datetime.datetime.strptime('2016-02-16', '%Y-%m-%d')
USER_ID = 'user_id'
EXP_ID_ONE = 'exp_id_one'
EXP_ID_TWO = 'exp_id_two'
def setUp(self):
super(ExplorationUserDataModelTest, self).setUp()
user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID, self.EXP_ID_ONE), user_id=self.USER_ID,
exploration_id=self.EXP_ID_ONE, rating=2,
rated_on=self.DATETIME_OBJECT,
draft_change_list={'new_content': {}},
draft_change_list_last_updated=self.DATETIME_OBJECT,
draft_change_list_exp_version=3).put()
def test_create_success(self):
user_models.ExplorationUserDataModel.create(
self.USER_ID, self.EXP_ID_TWO).put()
retrieved_object = user_models.ExplorationUserDataModel.get_by_id(
'%s.%s' % (self.USER_ID, self.EXP_ID_TWO))
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_TWO)
def test_get_success(self):
retrieved_object = user_models.ExplorationUserDataModel.get(
self.USER_ID, self.EXP_ID_ONE)
self.assertEqual(retrieved_object.user_id, self.USER_ID)
self.assertEqual(retrieved_object.exploration_id, self.EXP_ID_ONE)
self.assertEqual(retrieved_object.rating, 2)
self.assertEqual(retrieved_object.rated_on, self.DATETIME_OBJECT)
self.assertEqual(
retrieved_object.draft_change_list, {'new_content': {}})
self.assertEqual(retrieved_object.draft_change_list_last_updated,
self.DATETIME_OBJECT)
self.assertEqual(retrieved_object.draft_change_list_exp_version, 3)
def test_get_failure(self):
retrieved_object = user_models.ExplorationUserDataModel.get(
self.USER_ID, 'unknown_exp_id')
self.assertEqual(retrieved_object, None)
class UserQueryModelTests(test_utils.GenericTestBase):
"""Tests for UserQueryModel."""
def test_instance_stores_correct_data(self):
submitter_id = 'submitter'
query_id = 'qid'
inactive_in_last_n_days = 5
created_at_least_n_exps = 1
created_fewer_than_n_exps = 3
edited_at_least_n_exps = 2
edited_fewer_than_n_exps = 5
has_not_logged_in_for_n_days = 10
user_models.UserQueryModel(
id=query_id,
inactive_in_last_n_days=inactive_in_last_n_days,
created_at_least_n_exps=created_at_least_n_exps,
created_fewer_than_n_exps=created_fewer_than_n_exps,
edited_at_least_n_exps=edited_at_least_n_exps,
edited_fewer_than_n_exps=edited_fewer_than_n_exps,
has_not_logged_in_for_n_days=has_not_logged_in_for_n_days,
submitter_id=submitter_id).put()
query_model = user_models.UserQueryModel.get(query_id)
self.assertEqual(query_model.submitter_id, submitter_id)
self.assertEqual(
query_model.inactive_in_last_n_days, inactive_in_last_n_days)
self.assertEqual(
query_model.has_not_logged_in_for_n_days,
has_not_logged_in_for_n_days)
self.assertEqual(
query_model.created_at_least_n_exps, created_at_least_n_exps)
self.assertEqual(
query_model.created_fewer_than_n_exps, created_fewer_than_n_exps)
self.assertEqual(
query_model.edited_at_least_n_exps, edited_at_least_n_exps)
self.assertEqual(
query_model.edited_fewer_than_n_exps, edited_fewer_than_n_exps)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import mock
from oslo.config import cfg
from webob import exc
from neutron.api import extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import l3_rpc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron import context
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db
from neutron.extensions import agent
from neutron.extensions import dhcpagentscheduler
from neutron.extensions import l3agentscheduler
from neutron import manager
from neutron.openstack.common import timeutils
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.tests import fake_notifier
from neutron.tests.unit import test_agent_ext_plugin
from neutron.tests.unit import test_db_plugin as test_plugin
from neutron.tests.unit import test_extensions
from neutron.tests.unit import test_l3_plugin
from neutron.tests.unit import testlib_api
from neutron import wsgi
L3_HOSTA = 'hosta'
DHCP_HOSTA = 'hosta'
L3_HOSTB = 'hostb'
DHCP_HOSTC = 'hostc'
class AgentSchedulerTestMixIn(object):
def _request_list(self, path, admin_context=True,
expected_code=exc.HTTPOk.code):
req = self._path_req(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
return self.deserialize(self.fmt, res)
def _path_req(self, path, method='GET', data=None,
query_string=None,
admin_context=True):
content_type = 'application/%s' % self.fmt
body = None
if data is not None: # empty dict is valid
body = wsgi.Serializer().serialize(data, content_type)
if admin_context:
return testlib_api.create_request(
path, body, content_type, method, query_string=query_string)
else:
return testlib_api.create_request(
path, body, content_type, method, query_string=query_string,
context=context.Context('', 'tenant_id'))
def _path_create_request(self, path, data, admin_context=True):
return self._path_req(path, method='POST', data=data,
admin_context=admin_context)
def _path_show_request(self, path, admin_context=True):
return self._path_req(path, admin_context=admin_context)
def _path_delete_request(self, path, admin_context=True):
return self._path_req(path, method='DELETE',
admin_context=admin_context)
def _path_update_request(self, path, data, admin_context=True):
return self._path_req(path, method='PUT', data=data,
admin_context=admin_context)
def _list_routers_hosted_by_l3_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
l3agentscheduler.L3_ROUTERS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_networks_hosted_by_dhcp_agent(self, agent_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (agent_id,
dhcpagentscheduler.DHCP_NETS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_l3_agents_hosting_router(self, router_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/routers/%s/%s.%s" % (router_id,
l3agentscheduler.L3_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _list_dhcp_agents_hosting_network(self, network_id,
expected_code=exc.HTTPOk.code,
admin_context=True):
path = "/networks/%s/%s.%s" % (network_id,
dhcpagentscheduler.DHCP_AGENTS,
self.fmt)
return self._request_list(path, expected_code=expected_code,
admin_context=admin_context)
def _add_router_to_l3_agent(self, id, router_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
l3agentscheduler.L3_ROUTERS,
self.fmt)
req = self._path_create_request(path,
{'router_id': router_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _add_network_to_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPCreated.code,
admin_context=True):
path = "/agents/%s/%s.%s" % (id,
dhcpagentscheduler.DHCP_NETS,
self.fmt)
req = self._path_create_request(path,
{'network_id': network_id},
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_network_from_dhcp_agent(self, id, network_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
dhcpagentscheduler.DHCP_NETS,
network_id,
self.fmt)
req = self._path_delete_request(path,
admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _remove_router_from_l3_agent(self, id, router_id,
expected_code=exc.HTTPNoContent.code,
admin_context=True):
path = "/agents/%s/%s/%s.%s" % (id,
l3agentscheduler.L3_ROUTERS,
router_id,
self.fmt)
req = self._path_delete_request(path, admin_context=admin_context)
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, expected_code)
def _assert_notify(self, notifications, expected_event_type):
event_types = [event['event_type'] for event in notifications]
self.assertIn(expected_event_type, event_types)
def _register_one_agent_state(self, agent_state):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent_state},
time=timeutils.strtime())
def _disable_agent(self, agent_id, admin_state_up=False):
new_agent = {}
new_agent['agent'] = {}
new_agent['agent']['admin_state_up'] = admin_state_up
self._update('agents', agent_id, new_agent)
def _get_agent_id(self, agent_type, host):
agents = self._list_agents()
for agent_data in agents['agents']:
if (agent_data['agent_type'] == agent_type and
agent_data['host'] == host):
return agent_data['id']
class OvsAgentSchedulerTestCaseBase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
fmt = 'json'
plugin_str = ('neutron.plugins.openvswitch.'
'ovs_neutron_plugin.OVSNeutronPluginV2')
l3_plugin = None
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
if self.l3_plugin:
service_plugins = {'l3_plugin_name': self.l3_plugin}
else:
service_plugins = None
super(OvsAgentSchedulerTestCaseBase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
self.addCleanup(self.restore_attribute_map)
self.l3agentscheduler_dbMinxin = (
manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT))
self.l3_notify_p = mock.patch(
'neutron.extensions.l3agentscheduler.notify')
self.patched_l3_notify = self.l3_notify_p.start()
self.l3_periodic_p = mock.patch('neutron.db.L3AgentSchedulerDbMixin.'
'start_periodic_agent_status_check')
self.patched_l3_periodic = self.l3_notify_p.start()
self.dhcp_notify_p = mock.patch(
'neutron.extensions.dhcpagentscheduler.notify')
self.patched_dhcp_notify = self.dhcp_notify_p.start()
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
class OvsAgentSchedulerTestCase(OvsAgentSchedulerTestCaseBase):
def test_report_states(self):
self._register_agent_states()
agents = self._list_agents()
self.assertEqual(4, len(agents['agents']))
def test_network_scheduling_on_network_creation(self):
self._register_agent_states()
with self.network() as net:
dhcp_agents = self._list_dhcp_agents_hosting_network(
net['network']['id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_auto_schedule_with_disabled(self):
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(),
self.subnet()):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
self._disable_agent(hosta_id)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
# second agent will host all the networks since first is disabled.
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(networks['networks'])
networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(networks['networks'])
self.assertEqual(0, num_hosta_nets)
self.assertEqual(2, num_hostc_nets)
def test_network_auto_schedule_with_no_dhcp(self):
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(enable_dhcp=False),
self.subnet(enable_dhcp=False)):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
self._disable_agent(hosta_id)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(networks['networks'])
networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(networks['networks'])
self.assertEqual(0, num_hosta_nets)
self.assertEqual(0, num_hostc_nets)
def test_network_auto_schedule_with_multiple_agents(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(),
self.subnet()):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
networks = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(networks['networks'])
networks = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(networks['networks'])
self.assertEqual(2, num_hosta_nets)
self.assertEqual(2, num_hostc_nets)
def test_network_auto_schedule_restart_dhcp_agent(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
with self.subnet() as sub1:
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
dhcp_agents = self._list_dhcp_agents_hosting_network(
sub1['subnet']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
def test_network_auto_schedule_with_hosted(self):
# one agent hosts all the networks, other hosts none
cfg.CONF.set_override('allow_overlapping_ips', True)
with contextlib.nested(self.subnet(),
self.subnet()) as (sub1, sub2):
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
self._register_agent_states()
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
# second agent will not host the network since first has got it.
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTC)
dhcp_agents = self._list_dhcp_agents_hosting_network(
sub1['subnet']['network_id'])
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
hostc_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(hosta_nets['networks'])
hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(hostc_nets['networks'])
self.assertEqual(2, num_hosta_nets)
self.assertEqual(0, num_hostc_nets)
self.assertEqual(1, len(dhcp_agents['agents']))
self.assertEqual(DHCP_HOSTA, dhcp_agents['agents'][0]['host'])
def test_network_auto_schedule_with_hosted_2(self):
# one agent hosts one network
dhcp_rpc_cb = dhcp_rpc.DhcpRpcCallback()
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
dhcp_hostc = copy.deepcopy(dhcp_hosta)
dhcp_hostc['host'] = DHCP_HOSTC
cfg.CONF.set_override('allow_overlapping_ips', True)
with self.subnet() as sub1:
self._register_one_agent_state(dhcp_hosta)
dhcp_rpc_cb.get_active_networks(self.adminContext, host=DHCP_HOSTA)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
with self.subnet() as sub2:
self._register_one_agent_state(dhcp_hostc)
dhcp_rpc_cb.get_active_networks(self.adminContext,
host=DHCP_HOSTC)
dhcp_agents_1 = self._list_dhcp_agents_hosting_network(
sub1['subnet']['network_id'])
dhcp_agents_2 = self._list_dhcp_agents_hosting_network(
sub2['subnet']['network_id'])
hosta_nets = self._list_networks_hosted_by_dhcp_agent(hosta_id)
num_hosta_nets = len(hosta_nets['networks'])
hostc_id = self._get_agent_id(
constants.AGENT_TYPE_DHCP,
DHCP_HOSTC)
hostc_nets = self._list_networks_hosted_by_dhcp_agent(hostc_id)
num_hostc_nets = len(hostc_nets['networks'])
self.assertEqual(1, num_hosta_nets)
self.assertEqual(1, num_hostc_nets)
self.assertEqual(1, len(dhcp_agents_1['agents']))
self.assertEqual(1, len(dhcp_agents_2['agents']))
self.assertEqual(DHCP_HOSTA, dhcp_agents_1['agents'][0]['host'])
self.assertEqual(DHCP_HOSTC, dhcp_agents_2['agents'][0]['host'])
def test_network_scheduling_on_port_creation(self):
with self.subnet() as subnet:
dhcp_agents = self._list_dhcp_agents_hosting_network(
subnet['subnet']['network_id'])
result0 = len(dhcp_agents['agents'])
self._register_agent_states()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result1 = len(dhcp_agents['agents'])
self.assertEqual(0, result0)
self.assertEqual(1, result1)
def test_network_ha_scheduling_on_port_creation(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
with self.subnet() as subnet:
dhcp_agents = self._list_dhcp_agents_hosting_network(
subnet['subnet']['network_id'])
result0 = len(dhcp_agents['agents'])
self._register_agent_states()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result1 = len(dhcp_agents['agents'])
self.assertEqual(0, result0)
self.assertEqual(2, result1)
def test_network_ha_scheduling_on_port_creation_with_new_agent(self):
cfg.CONF.set_override('dhcp_agents_per_network', 3)
with self.subnet() as subnet:
dhcp_agents = self._list_dhcp_agents_hosting_network(
subnet['subnet']['network_id'])
result0 = len(dhcp_agents['agents'])
self._register_agent_states()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result1 = len(dhcp_agents['agents'])
self._register_one_dhcp_agent()
with self.port(subnet=subnet,
device_owner="compute:test:" + DHCP_HOSTA) as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
result2 = len(dhcp_agents['agents'])
self.assertEqual(0, result0)
self.assertEqual(2, result1)
self.assertEqual(3, result2)
def test_network_scheduler_with_disabled_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
with self.port() as port1:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port1['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
agents = self._list_agents()
self._disable_agent(agents['agents'][0]['id'])
with self.port() as port2:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port2['port']['network_id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_scheduler_with_down_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
is_agent_down_str = 'neutron.db.agents_db.AgentDbMixin.is_agent_down'
with mock.patch(is_agent_down_str) as mock_is_agent_down:
mock_is_agent_down.return_value = False
with self.port() as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
with mock.patch(is_agent_down_str) as mock_is_agent_down:
mock_is_agent_down.return_value = True
with self.port() as port:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port['port']['network_id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_scheduler_with_hosted_network(self):
plugin = manager.NeutronManager.get_plugin()
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
with self.port() as port1:
dhcp_agents = self._list_dhcp_agents_hosting_network(
port1['port']['network_id'])
self.assertEqual(1, len(dhcp_agents['agents']))
with mock.patch.object(plugin,
'get_dhcp_agents_hosting_networks',
autospec=True) as mock_hosting_agents:
mock_hosting_agents.return_value = plugin.get_agents_db(
self.adminContext)
with self.network('test', do_delete=False) as net1:
pass
with self.subnet(network=net1,
cidr='10.0.1.0/24',
do_delete=False) as subnet1:
pass
with self.port(subnet=subnet1, do_delete=False) as port2:
pass
dhcp_agents = self._list_dhcp_agents_hosting_network(
port2['port']['network_id'])
self.assertEqual(0, len(dhcp_agents['agents']))
def test_network_policy(self):
with self.network() as net1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._list_networks_hosted_by_dhcp_agent(
hosta_id, expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_network_to_dhcp_agent(
hosta_id, net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_network_to_dhcp_agent(hosta_id,
net1['network']['id'])
self._remove_network_from_dhcp_agent(
hosta_id, net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._list_dhcp_agents_hosting_network(
net1['network']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
def test_network_add_to_dhcp_agent(self):
with self.network() as net1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
num_before_add = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self._add_network_to_dhcp_agent(hosta_id,
net1['network']['id'])
num_after_add = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self.assertEqual(0, num_before_add)
self.assertEqual(1, num_after_add)
def test_network_remove_from_dhcp_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
with self.port() as port1:
num_before_remove = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self._remove_network_from_dhcp_agent(hosta_id,
port1['port']['network_id'])
num_after_remove = len(
self._list_networks_hosted_by_dhcp_agent(
hosta_id)['networks'])
self.assertEqual(1, num_before_remove)
self.assertEqual(0, num_after_remove)
def test_list_active_networks_on_not_registered_yet_dhcp_agent(self):
plugin = manager.NeutronManager.get_plugin()
nets = plugin.list_active_networks_on_active_dhcp_agent(
self.adminContext, host=DHCP_HOSTA)
self.assertEqual([], nets)
def test_reserved_port_after_network_remove_from_dhcp_agent(self):
dhcp_hosta = {
'binary': 'neutron-dhcp-agent',
'host': DHCP_HOSTA,
'topic': 'DHCP_AGENT',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True,
},
'agent_type': constants.AGENT_TYPE_DHCP}
self._register_one_agent_state(dhcp_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
with self.port(device_owner=constants.DEVICE_OWNER_DHCP,
host=DHCP_HOSTA) as port1:
self._remove_network_from_dhcp_agent(hosta_id,
port1['port']['network_id'])
port_res = self._list_ports(
'json',
200,
network_id=port1['port']['network_id'])
port_list = self.deserialize('json', port_res)
self.assertEqual(port_list['ports'][0]['device_id'],
constants.DEVICE_ID_RESERVED_DHCP_PORT)
def _take_down_agent_and_run_reschedule(self, host):
# take down the agent on host A and ensure B is alive
self.adminContext.session.begin(subtransactions=True)
query = self.adminContext.session.query(agents_db.Agent)
agt = query.filter_by(host=host).first()
agt.heartbeat_timestamp = (
agt.heartbeat_timestamp - datetime.timedelta(hours=1))
self.adminContext.session.commit()
plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
plugin.reschedule_routers_from_down_agents()
def _set_agent_admin_state_up(self, host, state):
self.adminContext.session.begin(subtransactions=True)
query = self.adminContext.session.query(agents_db.Agent)
agt_db = query.filter_by(host=host).first()
agt_db.admin_state_up = state
self.adminContext.session.commit()
def test_router_is_not_rescheduled_from_alive_agent(self):
with self.router():
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# schedule the router to host A
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
with mock.patch('neutron.db.l3_agentschedulers_db.'
'L3AgentSchedulerDbMixin.reschedule_router') as rr:
# take down some unrelated agent and run reschedule check
self._take_down_agent_and_run_reschedule(DHCP_HOSTC)
self.assertFalse(rr.called)
def test_router_reschedule_from_dead_agent(self):
with self.router():
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# schedule the router to host A
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self._take_down_agent_and_run_reschedule(L3_HOSTA)
# B should now pick up the router
ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
self.assertEqual(ret_b, ret_a)
def test_router_no_reschedule_from_dead_admin_down_agent(self):
with self.router() as r:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# schedule the router to host A
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self._set_agent_admin_state_up(L3_HOSTA, False)
self._take_down_agent_and_run_reschedule(L3_HOSTA)
# A should still have it even though it was inactive due to the
# admin_state being down
rab = l3_agentschedulers_db.RouterL3AgentBinding
binding = (self.adminContext.session.query(rab).
filter(rab.router_id == r['router']['id']).first())
self.assertEqual(binding.l3_agent.host, L3_HOSTA)
# B should not pick up the router
ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
self.assertFalse(ret_b)
def test_router_auto_schedule_with_invalid_router(self):
with self.router() as router:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# deleted router
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[router['router']['id']])
self.assertFalse(ret_a)
# non-existent router
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[uuidutils.generate_uuid()])
self.assertFalse(ret_a)
def test_router_auto_schedule_with_hosted(self):
with self.router() as router:
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
ret_b = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
l3_agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(ret_a))
self.assertIn(router['router']['id'], [r['id'] for r in ret_a])
self.assertFalse(len(ret_b))
self.assertEqual(1, len(l3_agents['agents']))
self.assertEqual(L3_HOSTA, l3_agents['agents'][0]['host'])
def test_router_auto_schedule_restart_l3_agent(self):
with self.router():
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
def test_router_auto_schedule_with_hosted_2(self):
# one agent hosts one router
l3_rpc_cb = l3_rpc.L3RpcCallback()
l3_hosta = {
'binary': 'neutron-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': True,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
l3_hostb = copy.deepcopy(l3_hosta)
l3_hostb['host'] = L3_HOSTB
with self.router() as router1:
self._register_one_agent_state(l3_hosta)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
with self.router() as router2:
self._register_one_agent_state(l3_hostb)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
hostb_id = self._get_agent_id(
constants.AGENT_TYPE_L3,
L3_HOSTB)
hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
num_hostc_routers = len(hostb_routers['routers'])
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, num_hostc_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(1, len(l3_agents_2['agents']))
self.assertEqual(L3_HOSTA, l3_agents_1['agents'][0]['host'])
self.assertEqual(L3_HOSTB, l3_agents_2['agents'][0]['host'])
def test_router_auto_schedule_with_disabled(self):
with contextlib.nested(self.router(),
self.router()):
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._disable_agent(hosta_id)
# first agent will not host router since it is disabled
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
# second agent will host all the routers since first is disabled.
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTB)
hostb_routers = self._list_routers_hosted_by_l3_agent(hostb_id)
num_hostb_routers = len(hostb_routers['routers'])
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
self.assertEqual(2, num_hostb_routers)
self.assertEqual(0, num_hosta_routers)
def test_router_auto_schedule_with_candidates(self):
l3_hosta = {
'binary': 'neutron-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': False,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
with contextlib.nested(self.router(),
self.router()) as (router1, router2):
l3_rpc_cb = l3_rpc.L3RpcCallback()
l3_hosta['configurations']['router_id'] = router1['router']['id']
self._register_one_agent_state(l3_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
# L3 agent will host only the compatible router.
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(0, len(l3_agents_2['agents']))
def test_rpc_sync_routers(self):
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
# No routers
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self.assertEqual(0, len(ret_a))
with contextlib.nested(self.router(),
self.router(),
self.router()) as routers:
router_ids = [r['router']['id'] for r in routers]
# Get all routers
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA)
self.assertEqual(3, len(ret_a))
self.assertEqual(set(router_ids), set([r['id'] for r in ret_a]))
# Get all routers (router_ids=None)
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=None)
self.assertEqual(3, len(ret_a))
self.assertEqual(set(router_ids), set([r['id'] for r in ret_a]))
# Get router2 only
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[router_ids[1]])
self.assertEqual(1, len(ret_a))
self.assertIn(router_ids[1], [r['id'] for r in ret_a])
# Get router1 and router3
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=[router_ids[0],
router_ids[2]])
self.assertEqual(2, len(ret_a))
self.assertIn(router_ids[0], [r['id'] for r in ret_a])
self.assertIn(router_ids[2], [r['id'] for r in ret_a])
def test_router_auto_schedule_for_specified_routers(self):
def _sync_router_with_ids(router_ids, exp_synced, exp_hosted, host_id):
ret_a = l3_rpc_cb.sync_routers(self.adminContext, host=L3_HOSTA,
router_ids=router_ids)
self.assertEqual(exp_synced, len(ret_a))
for r in router_ids:
self.assertIn(r, [r['id'] for r in ret_a])
host_routers = self._list_routers_hosted_by_l3_agent(host_id)
num_host_routers = len(host_routers['routers'])
self.assertEqual(exp_hosted, num_host_routers)
l3_rpc_cb = l3_rpc.L3RpcCallback()
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3, L3_HOSTA)
with contextlib.nested(self.router(), self.router(),
self.router(), self.router()) as routers:
router_ids = [r['router']['id'] for r in routers]
# Sync router1 (router1 is scheduled)
_sync_router_with_ids([router_ids[0]], 1, 1, hosta_id)
# Sync router1 only (no router is scheduled)
_sync_router_with_ids([router_ids[0]], 1, 1, hosta_id)
# Schedule router2
_sync_router_with_ids([router_ids[1]], 1, 2, hosta_id)
# Sync router2 and router4 (router4 is scheduled)
_sync_router_with_ids([router_ids[1], router_ids[3]],
2, 3, hosta_id)
# Sync all routers (router3 is scheduled)
_sync_router_with_ids(router_ids, 4, 4, hosta_id)
def test_router_schedule_with_candidates(self):
l3_hosta = {
'binary': 'neutron-l3-agent',
'host': L3_HOSTA,
'topic': 'L3_AGENT',
'configurations': {'use_namespaces': False,
'router_id': None,
'handle_internal_only_routers':
True,
'gateway_external_network_id':
None,
'interface_driver': 'interface_driver',
},
'agent_type': constants.AGENT_TYPE_L3}
with contextlib.nested(self.router(),
self.router(),
self.subnet(),
self.subnet(cidr='10.0.3.0/24')) as (router1,
router2,
subnet1,
subnet2):
l3_hosta['configurations']['router_id'] = router1['router']['id']
self._register_one_agent_state(l3_hosta)
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._router_interface_action('add',
router1['router']['id'],
subnet1['subnet']['id'],
None)
self._router_interface_action('add',
router2['router']['id'],
subnet2['subnet']['id'],
None)
hosta_routers = self._list_routers_hosted_by_l3_agent(hosta_id)
num_hosta_routers = len(hosta_routers['routers'])
l3_agents_1 = self._list_l3_agents_hosting_router(
router1['router']['id'])
l3_agents_2 = self._list_l3_agents_hosting_router(
router2['router']['id'])
# safe cleanup
self._router_interface_action('remove',
router1['router']['id'],
subnet1['subnet']['id'],
None)
self._router_interface_action('remove',
router2['router']['id'],
subnet2['subnet']['id'],
None)
# L3 agent will host only the compatible router.
self.assertEqual(1, num_hosta_routers)
self.assertEqual(1, len(l3_agents_1['agents']))
self.assertEqual(0, len(l3_agents_2['agents']))
def test_router_without_l3_agents(self):
with self.subnet() as s:
self._set_net_external(s['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
l3agents = (
self.l3agentscheduler_dbMinxin.get_l3_agents_hosting_routers(
self.adminContext, [router['router']['id']]))
self._delete('routers', router['router']['id'])
self.assertEqual(0, len(l3agents))
def test_router_sync_data(self):
with contextlib.nested(
self.subnet(),
self.subnet(cidr='10.0.2.0/24'),
self.subnet(cidr='10.0.3.0/24')
) as (s1, s2, s3):
self._register_agent_states()
self._set_net_external(s1['subnet']['network_id'])
data = {'router': {'tenant_id': uuidutils.generate_uuid()}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s1['subnet']['network_id']}
router_req = self.new_create_request('routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self._router_interface_action('add',
router['router']['id'],
s2['subnet']['id'],
None)
self._router_interface_action('add',
router['router']['id'],
s3['subnet']['id'],
None)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1, len(l3agents['agents']))
agents = self._list_agents()
another_l3_agent_id = None
another_l3_agent_host = None
default = l3agents['agents'][0]['id']
for com in agents['agents']:
if (com['id'] != default and
com['agent_type'] == constants.AGENT_TYPE_L3):
another_l3_agent_id = com['id']
another_l3_agent_host = com['host']
break
self.assertIsNotNone(another_l3_agent_id)
self._add_router_to_l3_agent(another_l3_agent_id,
router['router']['id'],
expected_code=exc.HTTPConflict.code)
self._remove_router_from_l3_agent(default,
router['router']['id'])
self._add_router_to_l3_agent(another_l3_agent_id,
router['router']['id'])
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(another_l3_agent_host,
l3agents['agents'][0]['host'])
self._remove_router_from_l3_agent(another_l3_agent_id,
router['router']['id'])
self._router_interface_action('remove',
router['router']['id'],
s2['subnet']['id'],
None)
l3agents = self._list_l3_agents_hosting_router(
router['router']['id'])
self.assertEqual(1,
len(l3agents['agents']))
self._router_interface_action('remove',
router['router']['id'],
s3['subnet']['id'],
None)
self._delete('routers', router['router']['id'])
def test_router_add_to_l3_agent(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
num_before_add = len(
self._list_routers_hosted_by_l3_agent(
hosta_id)['routers'])
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._add_router_to_l3_agent(hostb_id,
router1['router']['id'],
expected_code=exc.HTTPConflict.code)
num_after_add = len(
self._list_routers_hosted_by_l3_agent(
hosta_id)['routers'])
self.assertEqual(0, num_before_add)
self.assertEqual(1, num_after_add)
def test_router_add_to_l3_agent_two_times(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
# scheduling twice on the same agent is fine
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
def test_router_add_to_two_l3_agents(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
hostb_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTB)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
self._add_router_to_l3_agent(hostb_id,
router1['router']['id'],
expected_code=exc.HTTPConflict.code)
def test_router_policy(self):
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._list_routers_hosted_by_l3_agent(
hosta_id, expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_router_to_l3_agent(
hosta_id, router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._add_router_to_l3_agent(
hosta_id, router1['router']['id'])
self._remove_router_from_l3_agent(
hosta_id, router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
self._list_l3_agents_hosting_router(
router1['router']['id'],
expected_code=exc.HTTPForbidden.code,
admin_context=False)
class OvsDhcpAgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
plugin_str = ('neutron.plugins.openvswitch.'
'ovs_neutron_plugin.OVSNeutronPluginV2')
def setUp(self):
# Save the global RESOURCE_ATTRIBUTE_MAP before loading plugin
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
super(OvsDhcpAgentNotifierTestCase, self).setUp(self.plugin_str)
# the notifier is used to get access to make_msg() method only
self.dhcp_notifier = dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
self.dhcp_notifier_cast = mock.patch(
'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
'DhcpAgentNotifyAPI.cast').start()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
self.addCleanup(self.restore_attribute_map)
fake_notifier.reset()
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_network_add_to_dhcp_agent_notification(self):
with self.network() as net1:
network_id = net1['network']['id']
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._add_network_to_dhcp_agent(hosta_id,
network_id)
self.dhcp_notifier_cast.assert_called_with(
mock.ANY,
self.dhcp_notifier.make_msg(
'network_create_end',
payload={'network': {'id': network_id}}),
topic='dhcp_agent.' + DHCP_HOSTA)
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'dhcp_agent.network.add'
self._assert_notify(notifications, expected_event_type)
def test_network_remove_from_dhcp_agent_notification(self):
with self.network(do_delete=False) as net1:
network_id = net1['network']['id']
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._add_network_to_dhcp_agent(hosta_id,
network_id)
self._remove_network_from_dhcp_agent(hosta_id,
network_id)
self.dhcp_notifier_cast.assert_called_with(
mock.ANY,
self.dhcp_notifier.make_msg(
'network_delete_end',
payload={'network_id': network_id}),
topic='dhcp_agent.' + DHCP_HOSTA)
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'dhcp_agent.network.remove'
self._assert_notify(notifications, expected_event_type)
def test_agent_updated_dhcp_agent_notification(self):
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_DHCP,
DHCP_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
self.dhcp_notifier_cast.assert_called_with(
mock.ANY, self.dhcp_notifier.make_msg(
'agent_updated',
payload={'admin_state_up': False}),
topic='dhcp_agent.' + DHCP_HOSTA)
def _network_port_create(
self, hosts, gateway=attributes.ATTR_NOT_SPECIFIED, owner=None):
for host in hosts:
self._register_one_agent_state(
{'binary': 'neutron-dhcp-agent',
'host': host,
'topic': 'dhcp_agent',
'configurations': {'dhcp_driver': 'dhcp_driver',
'use_namespaces': True, },
'agent_type': constants.AGENT_TYPE_DHCP})
with self.network(do_delete=False) as net1:
with self.subnet(network=net1,
gateway_ip=gateway,
do_delete=False) as subnet1:
if owner:
with self.port(subnet=subnet1,
do_delete=False,
device_owner=owner) as port:
return [net1, subnet1, port]
else:
with self.port(subnet=subnet1,
do_delete=False) as port:
return [net1, subnet1, port]
def _notification_mocks(self, hosts, net, subnet, port):
host_calls = {}
for host in hosts:
expected_calls = [
mock.call(
mock.ANY,
self.dhcp_notifier.make_msg(
'network_create_end',
payload={'network': {'id': net['network']['id']}}),
topic='dhcp_agent.' + host),
mock.call(
mock.ANY,
self.dhcp_notifier.make_msg(
'port_create_end',
payload={'port': port['port']}),
topic='dhcp_agent.' + host)]
host_calls[host] = expected_calls
return host_calls
def test_network_port_create_notification(self):
hosts = [DHCP_HOSTA]
net, subnet, port = self._network_port_create(hosts)
expected_calls = self._notification_mocks(hosts, net, subnet, port)
self.assertEqual(
expected_calls[DHCP_HOSTA], self.dhcp_notifier_cast.call_args_list)
def test_network_ha_port_create_notification(self):
cfg.CONF.set_override('dhcp_agents_per_network', 2)
hosts = [DHCP_HOSTA, DHCP_HOSTC]
net, subnet, port = self._network_port_create(hosts)
expected_calls = self._notification_mocks(hosts, net, subnet, port)
for expected in expected_calls[DHCP_HOSTA]:
self.assertIn(expected, self.dhcp_notifier_cast.call_args_list)
for expected in expected_calls[DHCP_HOSTC]:
self.assertIn(expected, self.dhcp_notifier_cast.call_args_list)
def _is_schedule_network_called(self, device_id):
plugin = manager.NeutronManager.get_plugin()
notifier = plugin.agent_notifiers[constants.AGENT_TYPE_DHCP]
with contextlib.nested(
self.subnet(),
mock.patch.object(plugin,
'get_dhcp_agents_hosting_networks',
return_value=[]),
mock.patch.object(notifier,
'_schedule_network',
return_value=[])
) as (subnet, _, mock_sched):
with self.port(subnet=subnet, device_id=device_id):
return mock_sched.called
def test_reserved_dhcp_port_creation(self):
device_id = constants.DEVICE_ID_RESERVED_DHCP_PORT
self.assertFalse(self._is_schedule_network_called(device_id))
def test_unreserved_dhcp_port_creation(self):
device_id = 'not_reserved'
self.assertTrue(self._is_schedule_network_called(device_id))
class OvsL3AgentNotifierTestCase(test_l3_plugin.L3NatTestCaseMixin,
test_agent_ext_plugin.AgentDBTestMixIn,
AgentSchedulerTestMixIn,
test_plugin.NeutronDbPluginV2TestCase):
plugin_str = ('neutron.plugins.openvswitch.'
'ovs_neutron_plugin.OVSNeutronPluginV2')
l3_plugin = None
def setUp(self):
self.dhcp_notifier_cls_p = mock.patch(
'neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.'
'DhcpAgentNotifyAPI')
self.dhcp_notifier = mock.Mock(name='dhcp_notifier')
self.dhcp_notifier_cls = self.dhcp_notifier_cls_p.start()
self.dhcp_notifier_cls.return_value = self.dhcp_notifier
# Save the global RESOURCE_ATTRIBUTE_MAP
self.saved_attr_map = {}
for resource, attrs in attributes.RESOURCE_ATTRIBUTE_MAP.iteritems():
self.saved_attr_map[resource] = attrs.copy()
if self.l3_plugin:
service_plugins = {'l3_plugin_name': self.l3_plugin}
else:
service_plugins = None
super(OvsL3AgentNotifierTestCase, self).setUp(
self.plugin_str, service_plugins=service_plugins)
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
self.adminContext = context.get_admin_context()
# Add the resources to the global attribute map
# This is done here as the setup process won't
# initialize the main API router which extends
# the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
agent.RESOURCE_ATTRIBUTE_MAP)
self.addCleanup(self.restore_attribute_map)
fake_notifier.reset()
def restore_attribute_map(self):
# Restore the original RESOURCE_ATTRIBUTE_MAP
attributes.RESOURCE_ATTRIBUTE_MAP = self.saved_attr_map
def test_router_add_to_l3_agent_notification(self):
plugin = manager.NeutronManager.get_plugin()
l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with mock.patch.object(l3_notifier, 'cast') as mock_l3:
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
routers = [router1['router']['id']]
mock_l3.assert_called_with(
mock.ANY,
l3_notifier.make_msg(
'router_added_to_agent',
payload=routers),
topic='l3_agent.hosta')
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'l3_agent.router.add'
self._assert_notify(notifications, expected_event_type)
def test_router_remove_from_l3_agent_notification(self):
plugin = manager.NeutronManager.get_plugin()
l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with mock.patch.object(l3_notifier, 'cast') as mock_l3:
with self.router() as router1:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._add_router_to_l3_agent(hosta_id,
router1['router']['id'])
self._remove_router_from_l3_agent(hosta_id,
router1['router']['id'])
mock_l3.assert_called_with(
mock.ANY, l3_notifier.make_msg(
'router_removed_from_agent',
payload={'router_id': router1['router']['id']}),
topic='l3_agent.hosta')
notifications = fake_notifier.NOTIFICATIONS
expected_event_type = 'l3_agent.router.remove'
self._assert_notify(notifications, expected_event_type)
def test_agent_updated_l3_agent_notification(self):
plugin = manager.NeutronManager.get_plugin()
l3_notifier = plugin.agent_notifiers[constants.AGENT_TYPE_L3]
with mock.patch.object(l3_notifier, 'cast') as mock_l3:
self._register_agent_states()
hosta_id = self._get_agent_id(constants.AGENT_TYPE_L3,
L3_HOSTA)
self._disable_agent(hosta_id, admin_state_up=False)
mock_l3.assert_called_with(
mock.ANY, l3_notifier.make_msg(
'agent_updated', payload={'admin_state_up': False}),
topic='l3_agent.hosta')
class OvsAgentSchedulerTestCaseXML(OvsAgentSchedulerTestCase):
fmt = 'xml'
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# GrovePi Example for using the Grove - I2C ADC(http://www.seeedstudio.com/depot/Grove-I2C-ADC-p-1580.html)
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import grove_i2c_adc
import time
# You can initialize with a different address too: grove_i2c_adc.ADC(address=0x56)
adc= grove_i2c_adc.ADC()
while True:
#Print the 12 bit value from the I2C ADC
print adc.adc_read()
time.sleep(.5)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from __future__ import print_function
import sys, os, pdb
sys.path.insert(0, 'src')
import numpy as np, scipy.misc
from optimize import optimize
from argparse import ArgumentParser
from utils import save_img, get_img, exists, list_files
import evaluate
CONTENT_WEIGHT = 7.5e0
STYLE_WEIGHT = 1e2
TV_WEIGHT = 2e2
LEARNING_RATE = 1e-3
NUM_EPOCHS = 2
CHECKPOINT_DIR = 'checkpoints'
CHECKPOINT_ITERATIONS = 2000
VGG_PATH = 'data/imagenet-vgg-verydeep-19.mat'
TRAIN_PATH = 'data/train2014'
BATCH_SIZE = 4
DEVICE = '/gpu:0'
FRAC_GPU = 1
def build_parser():
parser = ArgumentParser()
parser.add_argument('--checkpoint-dir', type=str,
dest='checkpoint_dir', help='dir to save checkpoint in',
metavar='CHECKPOINT_DIR', required=True)
parser.add_argument('--style', type=str,
dest='style', help='style image path',
metavar='STYLE', required=True)
parser.add_argument('--train-path', type=str,
dest='train_path', help='path to training images folder',
metavar='TRAIN_PATH', default=TRAIN_PATH)
parser.add_argument('--test', type=str,
dest='test', help='test image path',
metavar='TEST', default=False)
parser.add_argument('--test-dir', type=str,
dest='test_dir', help='test image save dir',
metavar='TEST_DIR', default=False)
parser.add_argument('--slow', dest='slow', action='store_true',
help='gatys\' approach (for debugging, not supported)',
default=False)
parser.add_argument('--epochs', type=int,
dest='epochs', help='num epochs',
metavar='EPOCHS', default=NUM_EPOCHS)
parser.add_argument('--batch-size', type=int,
dest='batch_size', help='batch size',
metavar='BATCH_SIZE', default=BATCH_SIZE)
parser.add_argument('--checkpoint-iterations', type=int,
dest='checkpoint_iterations', help='checkpoint frequency',
metavar='CHECKPOINT_ITERATIONS',
default=CHECKPOINT_ITERATIONS)
parser.add_argument('--vgg-path', type=str,
dest='vgg_path',
help='path to VGG19 network (default %(default)s)',
metavar='VGG_PATH', default=VGG_PATH)
parser.add_argument('--content-weight', type=float,
dest='content_weight',
help='content weight (default %(default)s)',
metavar='CONTENT_WEIGHT', default=CONTENT_WEIGHT)
parser.add_argument('--style-weight', type=float,
dest='style_weight',
help='style weight (default %(default)s)',
metavar='STYLE_WEIGHT', default=STYLE_WEIGHT)
parser.add_argument('--tv-weight', type=float,
dest='tv_weight',
help='total variation regularization weight (default %(default)s)',
metavar='TV_WEIGHT', default=TV_WEIGHT)
parser.add_argument('--learning-rate', type=float,
dest='learning_rate',
help='learning rate (default %(default)s)',
metavar='LEARNING_RATE', default=LEARNING_RATE)
return parser
def check_opts(opts):
exists(opts.checkpoint_dir, "checkpoint dir not found!")
exists(opts.style, "style path not found!")
exists(opts.train_path, "train path not found!")
if opts.test or opts.test_dir:
exists(opts.test, "test img not found!")
exists(opts.test_dir, "test directory not found!")
exists(opts.vgg_path, "vgg network data not found!")
assert opts.epochs > 0
assert opts.batch_size > 0
assert opts.checkpoint_iterations > 0
assert os.path.exists(opts.vgg_path)
assert opts.content_weight >= 0
assert opts.style_weight >= 0
assert opts.tv_weight >= 0
assert opts.learning_rate >= 0
def _get_files(img_dir):
files = list_files(img_dir)
return [os.path.join(img_dir,x) for x in files]
def main():
parser = build_parser()
options = parser.parse_args()
check_opts(options)
style_target = get_img(options.style)
if not options.slow:
content_targets = _get_files(options.train_path)
elif options.test:
content_targets = [options.test]
kwargs = {
"slow":options.slow,
"epochs":options.epochs,
"print_iterations":options.checkpoint_iterations,
"batch_size":options.batch_size,
"save_path":os.path.join(options.checkpoint_dir,'fns.ckpt'),
"learning_rate":options.learning_rate
}
if options.slow:
if options.epochs < 10:
kwargs['epochs'] = 1000
if options.learning_rate < 1:
kwargs['learning_rate'] = 1e1
args = [
content_targets,
style_target,
options.content_weight,
options.style_weight,
options.tv_weight,
options.vgg_path
]
for preds, losses, i, epoch in optimize(*args, **kwargs):
style_loss, content_loss, tv_loss, loss = losses
print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
to_print = (style_loss, content_loss, tv_loss)
print('style: %s, content:%s, tv: %s' % to_print)
if options.test:
assert options.test_dir != False
preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i)
if not options.slow:
ckpt_dir = os.path.dirname(options.checkpoint_dir)
evaluate.ffwd_to_img(options.test,preds_path,
options.checkpoint_dir)
else:
save_img(preds_path, img)
ckpt_dir = options.checkpoint_dir
cmd_text = 'python evaluate.py --checkpoint-dir %s ...' % ckpt_dir
print("Training complete. For evaluation:\n `%s`" % cmd_text)
if __name__ == '__main__':
main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
],
),
]
|
python
|
github
|
https://github.com/django/django
|
tests/migrations/test_migrations_namespace_package/0001_initial.py
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import unittest
from pants.base.build_root import BuildRoot
from pants.util.contextutil import environment_as, pushd, temporary_dir
from pants.util.dirutil import safe_mkdir, safe_mkdtemp, safe_rmtree, touch
class BuildRootTest(unittest.TestCase):
def setUp(self):
self.original_root = BuildRoot().path
self.new_root = os.path.realpath(safe_mkdtemp())
BuildRoot().reset()
def tearDown(self):
BuildRoot().reset()
safe_rmtree(self.new_root)
def test_via_set(self):
BuildRoot().path = self.new_root
self.assertEqual(self.new_root, BuildRoot().path)
def test_reset(self):
BuildRoot().path = self.new_root
BuildRoot().reset()
self.assertEqual(self.original_root, BuildRoot().path)
def test_via_pants_runner(self):
with temporary_dir() as root:
root = os.path.realpath(root)
touch(os.path.join(root, 'pants'))
with pushd(root):
self.assertEqual(root, BuildRoot().path)
BuildRoot().reset()
child = os.path.join(root, 'one', 'two')
safe_mkdir(child)
with pushd(child):
self.assertEqual(root, BuildRoot().path)
def test_temporary(self):
with BuildRoot().temporary(self.new_root):
self.assertEqual(self.new_root, BuildRoot().path)
self.assertEqual(self.original_root, BuildRoot().path)
def test_singleton(self):
self.assertEqual(BuildRoot().path, BuildRoot().path)
BuildRoot().path = self.new_root
self.assertEqual(BuildRoot().path, BuildRoot().path)
def test_not_found(self):
with temporary_dir() as root:
root = os.path.realpath(root)
with pushd(root):
self.assertRaises(BuildRoot.NotFoundError, lambda: BuildRoot().path)
def test_buildroot_override(self):
with temporary_dir() as root:
with environment_as(PANTS_BUILDROOT_OVERRIDE=root):
self.assertEqual(BuildRoot().path, root)
|
unknown
|
codeparrot/codeparrot-clean
| ||
{
"backfill": {
"affected_few": "{{count}} مهام ستُشغّل.",
"affected_many": "{{count}} مهمة ستُشغّل.",
"affected_one": "مهمة واحدة ستُشغّل.",
"affected_other": "{{count}} مهمة ستُشغل.",
"affected_two": "مهمتان ستُشغّلان.",
"affected_zero": "لا توجد أي مهمة ستُشغَّل.",
"affectedNone": "لا توجد مهام تطابق المعايير المحددة.",
"allRuns": "جميع التشغيلات",
"backwards": "تشغيل رجعي",
"dateRange": "نطاق التاريخ",
"errorStartDateBeforeEndDate": "يجب أن يكون تاريخ البدء قبل تاريخ الانتهاء",
"maxRuns": "الحد الأقصى للتشغيلات النشطة",
"missingAndErroredRuns": "تشغيلات مفقودة وخاطئة",
"missingRuns": "تشغيلات مفقودة",
"permissionDenied": "فشل التشغيل التجريبي (Dry Run): ليس لدى المستخدم صلاحية لإنشاء تعبئات رجعية.",
"reprocessBehavior": "اعادة معالجة السلوك",
"run": "تشغيل التعبئة الرجعية",
"selectDescription": "تشغيل هذا Dag لنطاق من التواريخ",
"selectLabel": "تعبئة رجعية",
"title": "تشغيل التعبئة الرجعية",
"toaster": {
"success": {
"description": "تم تشغيل مهام التعبئة الرجعية بنجاح.",
"title": "تم إنشاء التعبئة الرجعية"
}
},
"tooltip": "التعبئة الرجعية تتطلب جدول زمني",
"unpause": "إلغاء إيقاف {{dag_display_name}} عند التشغيل",
"validation": {
"datesRequired": "يجب توفير بيانات كل من تاريخ بدء فترة وتاريخ الانتهاء.",
"startBeforeEnd": "بيانات فترة تاريخ البدء يجب ان تكون من او قبل تاريخ الانتهاء. "
}
},
"banner": {
"backfillInProgress": "التعبئة الرجعية قيد التنفيذ",
"cancel": "إلغاء التعبئة الرجعية",
"pause": "إيقاف التعبئة الرجعية",
"unpause": "إلغاء إيقاف التعبئة الرجعية"
},
"clipboard": {
"copy": "نسخ"
},
"close": "إغلاق",
"configForm": {
"advancedOptions": "خيارات متقدمة",
"configJson": "تكوين JSON",
"invalidJson": "تنسيق JSON غير صالح: {{errorMessage}}"
},
"dagWarnings": {
"error_few": "أخطاء",
"error_many": "أخطاء",
"error_one": "خطأ",
"error_other": "أخطاء",
"error_two": "خطآن",
"error_zero": "لا يوجد أي خطأ",
"errorAndWarning": "1 خطأ و{{warningText}}",
"warning_few": "{{count}} تحذيرات",
"warning_many": "{{count}} تحذير",
"warning_one": "1 تحذير",
"warning_other": "{{count}} تحذير",
"warning_two": "تحذيران",
"warning_zero": "لا يوجد أي تحذير"
},
"dateRangeFilter": {
"validation": {
"invalidDateFormat": "تنسيق التاريخ غير صالح.",
"invalidTimeFormat": "تنسيق الوقت غير صالح.",
"startBeforeEnd": "يجب أن يكون تاريخ/وقت البدء قبل تاريخ/وقت الانتهاء"
}
},
"durationChart": {
"duration": "المدة (بالثواني)",
"lastDagRun_few": "آخر {{count}} تشغيلات Dag",
"lastDagRun_many": "آخر {{count}} تشغيل Dag",
"lastDagRun_one": "آخر تشغيل Dag",
"lastDagRun_other": "آخر {{count}} تشغيل Dag",
"lastDagRun_two": "آخر تشغيلين Dag",
"lastDagRun_zero": "لا يوجد أي تشغيل Dag",
"lastTaskInstance_few": "آخر {{count}} مثيلات مهمة",
"lastTaskInstance_many": "آخر {{count}} مثيل مهمة",
"lastTaskInstance_one": "آخر مثيل مهمة",
"lastTaskInstance_other": "آخر {{count}} مثيل مهمة",
"lastTaskInstance_two": "آخر مثيلين مهمة",
"lastTaskInstance_zero": "لا يوجد أي مثيل مهمة",
"queuedDuration": "مدة الانتظار في الطابور",
"runAfter": "تشغيل بعد",
"runDuration": "مدة التشغيل"
},
"fileUpload": {
"files_few": "{{count}} ملفات",
"files_many": "{{count}} ملف",
"files_one": "ملف",
"files_other": "{{count}} ملف",
"files_two": "ملفان",
"files_zero": "لا يوجد أي ملف"
},
"flexibleForm": {
"placeholder": "اختر قيمة",
"placeholderArray": "أدخل كل سلسلة في سطر جديد",
"placeholderExamples": "ابدأ الكتابة لرؤية الخيارات",
"placeholderMulti": "اختر قيمة أو أكثر",
"validationErrorArrayNotArray": "القيمة يجب أن تكون مصفوفة.",
"validationErrorArrayNotNumbers": "جميع العناصر في المصفوفة يجب أن تكون أرقامًا.",
"validationErrorArrayNotObject": "جميع العناصر في المصفوفة يجب أن تكون كائنات.",
"validationErrorRequired": "هذا الحقل مطلوب"
},
"graph": {
"directionDown": "من الأعلى إلى الأسفل",
"directionLeft": "من اليمين إلى اليسار",
"directionRight": "من اليسار إلى اليمين",
"directionUp": "من الأسفل إلى الأعلى",
"downloadImage": "تحميل صورة الرسم البياني",
"downloadImageError": "فشل تحميل صورة الرسم البياني.",
"downloadImageErrorTitle": "فشل التحميل",
"otherDagRuns": "+تشغيلات Dag أخرى",
"taskCount_few": "{{count}} مهام",
"taskCount_many": "{{count}} مهمة",
"taskCount_one": "{{count}} مهمة",
"taskCount_other": "{{count}} مهمة",
"taskCount_two": "مهمتان",
"taskCount_zero": "لا يوجد أي مهمة",
"taskGroup": "مجموعة المهام"
},
"limitedList": "+{{count}} المزيد",
"limitedList.allItems": "جميع العناصر {{count}}:",
"limitedList.allTags_few": "كل العلامات ({{count}})",
"limitedList.allTags_many": "كل العلامات ({{count}})",
"limitedList.allTags_one": "كل العلامات (1)",
"limitedList.allTags_other": "كل العلامات ({{count}})",
"limitedList.allTags_two": "كل العلامات (2)",
"limitedList.allTags_zero": "كل العلامات (0)",
"limitedList.clickToInteract": "انقر على علامة لتصفية Dags",
"limitedList.clickToOpenFull": "انقر \"+{{count}} المزيد\" لعرض القائمة الكاملة",
"limitedList.copyPasteText": "يمكنك نسخ ولصق النص أعلاه",
"limitedList.showingItems_few": "عرض {{count}} عناصر",
"limitedList.showingItems_many": "عرض {{count}} عنصر",
"limitedList.showingItems_one": "عرض عنصر واحد",
"limitedList.showingItems_other": "عرض {{count}} عنصر",
"limitedList.showingItems_two": "عرض عنصرين",
"limitedList.showingItems_zero": "عرض 0 عناصر",
"logs": {
"file": "ملف",
"location": "سطر {{line}} في {{name}}"
},
"reparseDag": "إعادة تحليل Dag",
"sortedAscending": "الترتيب تصاعدي",
"sortedDescending": "الترتيب تنازلي",
"sortedUnsorted": "غير مرتب",
"taskTries": "محاولات المهمة",
"taskTryPlaceholder": "محاولة المهمة",
"team": {
"selector": {
"helperText": "اختياري. قصر الاستخدام على فريق محدد.",
"label": "فريق",
"placeHolder": "اختر فريقًا"
}
},
"toggleCardView": "عرض البطاقة",
"toggleTableView": "عرض الجدول",
"triggerDag": {
"button": "تشغيل",
"dataInterval": "فترة البيانات",
"dataIntervalAuto": "مستنتجة من التاريخ المنطقي والجدول الزمني",
"dataIntervalManual": "تحديد يدويًا",
"intervalEnd": "النهاية",
"intervalStart": "البداية",
"loading": "جارٍ تحميل معلومات Dag...",
"loadingFailed": "فشل تحميل معلومات Dag. يرجى المحاولة مرة أخرى.",
"runIdHelp": "اختياري - سيتم توليده تلقائيًا إذا لم يتم توفيره.",
"selectDescription": "تشغيل عملية واحدة من هذا Dag",
"selectLabel": "تشغيلة واحدة",
"title": "تشغيل Dag",
"toaster": {
"error": {
"title": "فشل تشغيل Dag"
},
"success": {
"description": "تم تشغيل عملية Dag بنجاح.",
"title": "تم تشغيل Dag"
}
},
"triggerAgainWithConfig": "تشغيل مرة أخرى باستخدام هذا التكوين",
"unpause": "إلغاء إيقاف {{dagDisplayName}} عند التشغيل"
},
"trimText": {
"details": "التفاصيل",
"empty": "فارغ",
"noContent": "لا توجد محتويات متاحة."
},
"versionDetails": {
"bundleLink": "رابط الحزمة",
"bundleName": "اسم الحزمة",
"bundleVersion": "إصدار الحزمة",
"createdAt": "تاريخ الإنشاء",
"versionId": "معرف الإصدار"
},
"versionSelect": {
"allVersions": "كل الإصدارات",
"dagVersion": "إصدار Dag",
"versionCode": "v{{versionCode}}"
}
}
|
json
|
github
|
https://github.com/apache/airflow
|
airflow-core/src/airflow/ui/public/i18n/locales/ar/components.json
|
"""Setup some common test helper things."""
import functools
import logging
import pytest
import requests_mock as _requests_mock
from homeassistant import util
from homeassistant.util import location
from .common import async_test_home_assistant
from .test_util.aiohttp import mock_aiohttp_client
logging.basicConfig()
logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
def test_real(func):
"""Force a function to require a keyword _test_real to be passed in."""
@functools.wraps(func)
def guard_func(*args, **kwargs):
real = kwargs.pop('_test_real', None)
if not real:
raise Exception('Forgot to mock or pass "_test_real=True" to %s',
func.__name__)
return func(*args, **kwargs)
return guard_func
# Guard a few functions that would make network connections
location.detect_location_info = test_real(location.detect_location_info)
location.elevation = test_real(location.elevation)
util.get_local_ip = lambda: '127.0.0.1'
@pytest.fixture
def hass(loop):
"""Fixture to provide a test instance of HASS."""
hass = loop.run_until_complete(async_test_home_assistant(loop))
yield hass
loop.run_until_complete(hass.async_stop())
@pytest.fixture
def requests_mock():
"""Fixture to provide a requests mocker."""
with _requests_mock.mock() as m:
yield m
@pytest.fixture
def aioclient_mock():
"""Fixture to mock aioclient calls."""
with mock_aiohttp_client() as mock_session:
yield mock_session
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
This module contains common utility objects/functions for the other query
parser modules.
"""
import sys
from whoosh.compat import string_type
class QueryParserError(Exception):
def __init__(self, cause, msg=None):
super(QueryParserError, self).__init__(str(cause))
self.cause = cause
def get_single_text(field, text, **kwargs):
"""Returns the first token from an analyzer's output.
"""
for t in field.process_text(text, mode="query", **kwargs):
return t
def attach(q, stxnode):
if q:
try:
q.startchar = stxnode.startchar
q.endchar = stxnode.endchar
except AttributeError:
raise AttributeError("Can't set attribute on %s"
% q.__class__.__name__)
return q
def print_debug(level, msg, out=sys.stderr):
if level:
out.write("%s%s\n" % (" " * (level - 1), msg))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from re import compile
from ..scraper import _BasicScraper
from ..util import tagre
class VampireCheerleaders(_BasicScraper):
url = 'http://www.vampirecheerleaders.net/'
stripUrl = url + 'strips-vc/%s'
firstStripUrl = stripUrl % 'fang_service'
imageSearch = compile(tagre("img", "src", r'(/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(http://www\.vampirecheerleaders\.net/strips-vc/[^"]+)', before="cndprev"))
help = 'Index format: name'
class VGCats(_BasicScraper):
url = 'http://www.vgcats.com/comics/'
stripUrl = url + '?strip_id=%s'
firstStripUrl = stripUrl % '0'
imageSearch = compile(tagre("img", "src", r'(images/\d{6}\.[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(\?strip_id=\d+)') +
tagre("img", "src", r"back\.gif"))
help = 'Index format: n (unpadded)'
class VGCatsAdventure(VGCats):
name = 'VGCats/Adventure'
url = 'http://www.vgcats.com/ffxi/'
stripUrl = url + '?strip_id=%s'
class VGCatsSuper(VGCats):
name = 'VGCats/Super'
url = 'http://www.vgcats.com/super/'
stripUrl = url + '?strip_id=%s'
class VictimsOfTheSystem(_BasicScraper):
url = 'http://www.votscomic.com/'
stripUrl = url + '?id=%s.jpg'
firstStripUrl = stripUrl % '070103-002452'
imageSearch = compile(tagre("img", "src", r'(comicpro/strips/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(\?id=\d+-\d+\.jpg)') +
"Previous")
help = 'Index format: nnn-nnn'
class ViiviJaWagner(_BasicScraper):
url = 'http://www.hs.fi/viivijawagner/'
stripUrl = None
imageSearch = compile(tagre("img", "src", r'(http://hs\d+\.snstatic\.fi/webkuva/sarjis/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(/viivijawagner/[^"]+)',
before="prev-cm"))
help = 'Index format: none'
lang = 'fi'
def namer(self, image_url, page_url):
return image_url.split('=')[1]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# _*_ coding: utf-8 _*_
'''
lutil: library functions for XBMC video plugins.
Copyright (C) 2013 José Antonio Montes (jamontes)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Description:
These funtions are called from the main plugin module, aimed to ease
and simplify the plugin development process.
Release 0.1.7
'''
# First of all We must import all the libraries used for plugin development.
import re, urllib, urllib2
debug_enable = False # The debug logs are disabled by default.
def set_debug_mode(debug_flag):
"""This function sets the debug_enable var to log everything if debug option is true."""
global debug_enable
debug_enable = debug_flag in ("true", True)
def log(message):
"""This function logs the messages into the main XBMC log file. Called from main plugin module."""
if debug_enable:
print "%s" % message
def _log(message):
"""This function logs the messages into the main XBMC log file. Called from the libraries module by other functions."""
if debug_enable:
print "lutils.%s" % message
def get_url_decoded(url):
"""This function returns the URL decoded."""
_log('get_url_decoded URL: "%s"' % url)
return urllib.unquote_plus(url)
def get_url_encoded(url):
"""This function returns the URL encoded."""
_log('get_url_encoded URL: "%s"' % url)
return urllib.quote_plus(url)
def get_parms_encoded(**kwars):
"""This function returns the params encoded to form an URL or data post."""
param_list = urllib.urlencode(kwars)
_log('get_parms_encoded params: "%s"' % param_list)
return param_list
def carga_web(url):
"""This function loads the html code from a webserver and returns it into a string."""
_log("carga_web " + url)
MiReq = urllib2.Request(url) # We use the Request method because we need to add a header into the HTTP GET to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
MiConex.close() # We close the HTTP connection as we have all the info required.
return MiHTML
def carga_web_cookies(url, headers=''):
"""This function loads the html code from a webserver passsing the headers into the GET message
and returns it into a string along with the cookies collected from the website."""
_log("carga_web_cookies " + url)
MiReq = urllib2.Request(url) # We use the Request method because we need to add a header into the HTTP GET to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
for key in headers:
MiReq.add_header(key, headers[key])
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
server_info = "%s" % MiConex.info()
my_cookie_pattern = re.compile('Set-Cookie: ([^;]+);')
my_cookies = ''
pcookie = ''
for lcookie in my_cookie_pattern.findall(server_info):
if (lcookie != pcookie):
my_cookies = "%s %s;" % (my_cookies, lcookie)
pcookie = lcookie
MiConex.close() # We close the HTTP connection as we have all the info required.
_log("carga_web Cookie:%s" % my_cookies)
return MiHTML, my_cookies
def send_post_data(url, headers='', data=''):
"""This function sends an HTTP POST request with theirr corresponding headers and data to a webserver
and returns the html code into a string along with the cookies collected from the website."""
_log("send_post_data " + url)
MiReq = urllib2.Request(url, data) # We use the Request method because we need to send a HTTP POST to the web site.
# We have to tell the web site we are using a real browser.
MiReq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:17.0) Gecko/20100101 Firefox/17.0') # This is a true Firefox header.
for key in headers:
MiReq.add_header(key, headers[key])
MiConex = urllib2.urlopen(MiReq) # We open the HTTP connection to the URL.
MiHTML = MiConex.read() # We load all the HTML contents from the web page and store it into a var.
server_info = "%s" % MiConex.info()
my_cookie_pattern = re.compile('Set-Cookie: ([^;]+);')
my_cookies = ''
pcookie = ''
for lcookie in my_cookie_pattern.findall(server_info):
if (lcookie != pcookie):
my_cookies = "%s %s;" % (my_cookies, lcookie)
pcookie = lcookie
MiConex.close() # We close the HTTP connection as we have all the info required.
_log("carga_web Cookie:%s" % my_cookies)
return MiHTML, my_cookies
def get_redirect(url):
"""This function returns the redirected URL from a 30X response received from the webserver."""
_log("get_redirect " + url)
MiConex = urllib.urlopen(url) # Opens the http connection to the URL.
MiHTML = MiConex.geturl() # Gets the URL redirect link and stores it into MiHTML.
MiConex.close() # Close the http connection as we get what we need.
return MiHTML
def find_multiple(text,pattern):
"""This function allows us to find multiples matches from a regexp into a string."""
pat_url_par = re.compile(pattern, re.DOTALL)
return pat_url_par.findall(text)
def find_first(text,pattern):
"""This function gets back the first match from a regexp into a string."""
pat_url_par = re.compile(pattern, re.DOTALL)
try:
return pat_url_par.findall(text)[0]
except:
return ""
def sanitize_url(url_string):
"""Fixes URL format for certain different URL patterns on latelelibre.fr"""
prefix = ''
if url_string.startswith('//'):
prefix = 'http:'
elif url_string.startswith('/'):
prefix = 'http://latelelibre.fr'
return prefix + url_string
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* By downloading, copying, installing or using the software you agree to this license.
* If you do not agree to this license, do not download, install,
* copy or use the software.
*
*
* License Agreement
* For Open Source Computer Vision Library
* (3-clause BSD License)
*
* Copyright (C) 2014, NVIDIA Corporation, all rights reserved.
* Third party copyrights are property of their respective owners.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the names of the copyright holders nor the names of the contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* This software is provided by the copyright holders and contributors "as is" and
* any express or implied warranties, including, but not limited to, the implied
* warranties of merchantability and fitness for a particular purpose are disclaimed.
* In no event shall copyright holders or contributors be liable for any direct,
* indirect, incidental, special, exemplary, or consequential damages
* (including, but not limited to, procurement of substitute goods or services;
* loss of use, data, or profits; or business interruption) however caused
* and on any theory of liability, whether in contract, strict liability,
* or tort (including negligence or otherwise) arising in any way out of
* the use of this software, even if advised of the possibility of such damage.
*/
#include <algorithm>
#include "common.hpp"
#include "vtransform.hpp"
namespace CAROTENE_NS {
#ifdef CAROTENE_NEON
namespace {
template <typename T>
struct Min
{
typedef T type;
void operator() (const typename internal::VecTraits<T>::vec128 & v_src0,
const typename internal::VecTraits<T>::vec128 & v_src1,
typename internal::VecTraits<T>::vec128 & v_dst) const
{
v_dst = internal::vminq(v_src0, v_src1);
}
void operator() (const typename internal::VecTraits<T>::vec64 & v_src0,
const typename internal::VecTraits<T>::vec64 & v_src1,
typename internal::VecTraits<T>::vec64 & v_dst) const
{
v_dst = internal::vmin(v_src0, v_src1);
}
void operator() (const T * src0, const T * src1, T * dst) const
{
dst[0] = std::min(src0[0], src1[0]);
}
};
template <typename T>
struct Max
{
typedef T type;
void operator() (const typename internal::VecTraits<T>::vec128 & v_src0,
const typename internal::VecTraits<T>::vec128 & v_src1,
typename internal::VecTraits<T>::vec128 & v_dst) const
{
v_dst = internal::vmaxq(v_src0, v_src1);
}
void operator() (const typename internal::VecTraits<T>::vec64 & v_src0,
const typename internal::VecTraits<T>::vec64 & v_src1,
typename internal::VecTraits<T>::vec64 & v_dst) const
{
v_dst = internal::vmax(v_src0, v_src1);
}
void operator() (const T * src0, const T * src1, T * dst) const
{
dst[0] = std::max(src0[0], src1[0]);
}
};
} // namespace
#define IMPL_OP(fun, op, type) \
void fun(const Size2D &size, \
const type * src0Base, ptrdiff_t src0Stride, \
const type * src1Base, ptrdiff_t src1Stride, \
type * dstBase, ptrdiff_t dstStride) \
{ \
internal::assertSupportedConfiguration(); \
internal::vtransform(size, \
src0Base, src0Stride, \
src1Base, src1Stride, \
dstBase, dstStride, op<type>()); \
}
#else
#define IMPL_OP(fun, op, type) \
void fun(const Size2D &, \
const type *, ptrdiff_t, \
const type *, ptrdiff_t, \
type *, ptrdiff_t) \
{ \
internal::assertSupportedConfiguration(); \
}
#endif
#define IMPL_MINMAX(type) IMPL_OP(min, Min, type) IMPL_OP(max, Max, type)
IMPL_MINMAX(u8)
IMPL_MINMAX(s8)
IMPL_MINMAX(u16)
IMPL_MINMAX(s16)
IMPL_MINMAX(u32)
IMPL_MINMAX(s32)
IMPL_MINMAX(f32)
} // namespace CAROTENE_NS
|
cpp
|
github
|
https://github.com/opencv/opencv
|
hal/carotene/src/min_max.cpp
|
#! /usr/bin/env python
"""Test program for the fcntl C module.
OS/2+EMX doesn't support the file locking operations.
Roger E. Masse
"""
import struct
import fcntl
import os, sys
from test.test_support import verbose, TESTFN
filename = TESTFN
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if sys.platform.startswith('atheos'):
start_len = "qq"
if sys.platform in ('netbsd1', 'Darwin1.2', 'darwin',
'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', 'freebsd6',
'bsdos2', 'bsdos3', 'bsdos4',
'openbsd', 'openbsd2', 'openbsd3'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t+off_t+pid_t+'hh', 0, 0, 0, fcntl.F_WRLCK, 0)
elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
elif sys.platform in ['os2emx']:
lockdata = None
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print 'struct.pack: ', repr(lockdata)
# the example from the library docs
f = open(filename, 'w')
rv = fcntl.fcntl(f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print 'Status from fcntl with O_NONBLOCK: ', rv
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print 'String from fcntl with F_SETLKW: ', repr(rv)
f.close()
os.unlink(filename)
# Again, but pass the file rather than numeric descriptor:
f = open(filename, 'w')
rv = fcntl.fcntl(f, fcntl.F_SETFL, os.O_NONBLOCK)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(f, fcntl.F_SETLKW, lockdata)
f.close()
os.unlink(filename)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# frozen_string_literal: true
module Jekyll
Generator = Class.new(Plugin)
end
|
ruby
|
github
|
https://github.com/jekyll/jekyll
|
lib/jekyll/generator.rb
|
import unittest
from django.db import NotSupportedError, connection
from django.db.models import CharField, F, Value
from django.db.models.functions import Cast, JSONArray, JSONObject, Lower
from django.test import TestCase
from django.test.testcases import skipIfDBFeature, skipUnlessDBFeature
from django.utils import timezone
from ..models import Article, Author
@skipUnlessDBFeature("supports_json_field")
class JSONArrayTests(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.create(name="Ivan Ivanov", alias="iivanov")
def test_empty(self):
obj = Author.objects.annotate(json_array=JSONArray()).first()
self.assertEqual(obj.json_array, [])
def test_basic(self):
obj = Author.objects.annotate(
json_array=JSONArray(Value("name"), F("name"))
).first()
self.assertEqual(obj.json_array, ["name", "Ivan Ivanov"])
def test_expressions(self):
obj = Author.objects.annotate(
json_array=JSONArray(
Lower("name"),
F("alias"),
F("goes_by"),
Value(30000.15),
F("age") * 2,
)
).first()
self.assertEqual(
obj.json_array,
[
"ivan ivanov",
"iivanov",
None,
30000.15,
60,
],
)
def test_nested_json_array(self):
obj = Author.objects.annotate(
json_array=JSONArray(
F("name"),
JSONArray(F("alias"), F("age")),
)
).first()
self.assertEqual(
obj.json_array,
[
"Ivan Ivanov",
["iivanov", 30],
],
)
def test_nested_empty_json_array(self):
obj = Author.objects.annotate(
json_array=JSONArray(
F("name"),
JSONArray(),
)
).first()
self.assertEqual(
obj.json_array,
[
"Ivan Ivanov",
[],
],
)
def test_textfield(self):
Article.objects.create(
title="The Title",
text="x" * 4000,
written=timezone.now(),
)
obj = Article.objects.annotate(json_array=JSONArray(F("text"))).first()
self.assertEqual(obj.json_array, ["x" * 4000])
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests")
def test_explicit_cast(self):
qs = Author.objects.annotate(
json_array=JSONArray(Cast("age", CharField()))
).values("json_array")
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(qs, [{"json_array": ["30"]}])
sql = ctx.captured_queries[0]["sql"]
self.assertIn("::varchar", sql)
self.assertNotIn("::varchar)::varchar", sql)
def test_order_by_key(self):
qs = Author.objects.annotate(arr=JSONArray(F("alias"))).order_by("arr__0")
self.assertQuerySetEqual(qs, Author.objects.order_by("alias"))
def test_order_by_nested_key(self):
qs = Author.objects.annotate(arr=JSONArray(JSONArray(F("alias")))).order_by(
"-arr__0__0"
)
self.assertQuerySetEqual(qs, Author.objects.order_by("-alias"))
@skipIfDBFeature("supports_json_field")
class JSONArrayNotSupportedTests(TestCase):
def test_not_supported(self):
msg = "JSONFields are not supported on this database backend."
with self.assertRaisesMessage(NotSupportedError, msg):
Author.objects.annotate(json_array=JSONArray()).first()
@skipUnlessDBFeature("has_json_object_function", "supports_json_field")
class JSONArrayObjectTests(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.create(name="Ivan Ivanov", alias="iivanov")
def test_nested_json_array_object(self):
obj = Author.objects.annotate(
json_array=JSONArray(
JSONObject(
name1="name",
nested_json_object1=JSONObject(alias1="alias", age1="age"),
),
JSONObject(
name2="name",
nested_json_object2=JSONObject(alias2="alias", age2="age"),
),
)
).first()
self.assertEqual(
obj.json_array,
[
{
"name1": "Ivan Ivanov",
"nested_json_object1": {"alias1": "iivanov", "age1": 30},
},
{
"name2": "Ivan Ivanov",
"nested_json_object2": {"alias2": "iivanov", "age2": 30},
},
],
)
def test_nested_json_object_array(self):
obj = Author.objects.annotate(
json_object=JSONObject(
name="name",
nested_json_array=JSONArray(
JSONObject(alias1="alias", age1="age"),
JSONObject(alias2="alias", age2="age"),
),
)
).first()
self.assertEqual(
obj.json_object,
{
"name": "Ivan Ivanov",
"nested_json_array": [
{"alias1": "iivanov", "age1": 30},
{"alias2": "iivanov", "age2": 30},
],
},
)
def test_order_by_nested_key(self):
qs = Author.objects.annotate(
arr=JSONArray(JSONObject(alias=F("alias")))
).order_by("-arr__0__alias")
self.assertQuerySetEqual(qs, Author.objects.order_by("-alias"))
|
python
|
github
|
https://github.com/django/django
|
tests/db_functions/json/test_json_array.py
|
// run
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
// Issue #45624 is the proposal to accept new(expr) in go1.26.
// Here we test its run-time behavior.
func main() {
{
p := new(123) // untyped constant expr
if *p != 123 {
panic("wrong value")
}
}
{
x := 42
p := new(x) // non-constant expr
if *p != x {
panic("wrong value")
}
}
{
x := [2]int{123, 456}
p := new(x) // composite value
if *p != x {
panic("wrong value")
}
}
{
var i int
v := new(i > 0) // untyped expression, see issue #75617
if *v != false {
panic("wrong value")
}
}
}
// Regression test for ICE in staticdata.GlobalLinksym from
// use of autotemp outside a function (go.dev/issue/77237).
var (
x = new(0)
y = x
)
|
go
|
github
|
https://github.com/golang/go
|
test/newexpr.go
|
#![allow(rustc::symbol_intern_string_literal)]
use rustc_hashes::Hash64;
use rustc_span::def_id::{DefPathHash, StableCrateId};
use rustc_span::edition::Edition;
use rustc_span::{Symbol, create_session_globals_then};
use crate::definitions::{DefKey, DefPathData, DisambiguatedDefPathData};
#[test]
fn def_path_hash_depends_on_crate_id() {
// This test makes sure that *both* halves of a DefPathHash depend on
// the crate-id of the defining crate. This is a desirable property
// because the crate-id can be more easily changed than the DefPath
// of an item, so, in the case of a crate-local DefPathHash collision,
// the user can simply "roll the dice again" for all DefPathHashes in
// the crate by changing the crate disambiguator (e.g. via bumping the
// crate's version number).
create_session_globals_then(Edition::Edition2024, &[], None, || {
let id0 = StableCrateId::new(Symbol::intern("foo"), false, vec!["1".to_string()], "");
let id1 = StableCrateId::new(Symbol::intern("foo"), false, vec!["2".to_string()], "");
let h0 = mk_test_hash(id0);
let h1 = mk_test_hash(id1);
assert_ne!(h0.stable_crate_id(), h1.stable_crate_id());
assert_ne!(h0.local_hash(), h1.local_hash());
fn mk_test_hash(stable_crate_id: StableCrateId) -> DefPathHash {
let parent_hash =
DefPathHash::new(stable_crate_id, Hash64::new(stable_crate_id.as_u64()));
let key = DefKey {
parent: None,
disambiguated_data: DisambiguatedDefPathData {
data: DefPathData::CrateRoot,
disambiguator: 0,
},
};
key.compute_stable_hash(parent_hash)
}
})
}
|
rust
|
github
|
https://github.com/rust-lang/rust
|
compiler/rustc_hir/src/tests.rs
|
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks as coroutine
from autobahn.twisted.connection import Connection
def main(reactor, connection):
@coroutine
def on_join(session, details):
print("on_join: {}".format(details))
try:
print(session._transport)
print(session._transport.websocket_protocol_in_use)
except Exception as e:
pass
def add2(a, b):
print("add2() called", a, b)
return a + b
yield session.register(add2, u'com.example.add2')
try:
res = yield session.call(u'com.example.add2', 2, 3)
print("result: {}".format(res))
except Exception as e:
print("error: {}".format(e))
finally:
print("leaving ..")
session.leave()
connection.on('join', on_join)
if __name__ == '__main__':
#import txaio
#txaio.use_twisted()
#txaio.start_logging(level='debug')
transports = [
{
'type': 'rawsocket',
'serializer': 'msgpack',
'endpoint': {
'type': 'unix',
'path': '/tmp/cb1.sock'
}
},
{
'type': 'websocket',
'url': 'ws://127.0.0.1:8080/ws',
'endpoint': {
'type': 'tcp',
'host': '127.0.0.1',
'port': 8080
}
}
]
connection = Connection(transports=transports)
connection.on('start', main)
react(connection.start)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright 2012-2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, fft, filter, blocks
import math
def sig_source_c(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.cos(2. * math.pi * freq * x) +
1j * math.sin(2. * math.pi * freq * x) for x in t]
return y
def run_test(tb, channel, fft_rotate, fft_filter):
N = 1000 # number of samples to use
M = 5 # Number of channels
fs = 5000.0 # baseband sampling rate
ifs = M * fs # input samp rate to decimator
taps = filter.firdes.low_pass_2(1, ifs, fs / 2, fs / 10,
attenuation_dB=80,
window=fft.window.WIN_BLACKMAN_hARRIS)
signals = list()
add = blocks.add_cc()
freqs = [-230., 121., 110., -513., 203.]
Mch = ((len(freqs) - 1) // 2 + channel) % len(freqs)
for i in range(len(freqs)):
f = freqs[i] + (M // 2 - M + i + 1) * fs
data = sig_source_c(ifs, f, 1, N)
signals.append(blocks.vector_source_c(data))
tb.connect(signals[i], (add, i))
s2ss = blocks.stream_to_streams(gr.sizeof_gr_complex, M)
pfb = filter.pfb_decimator_ccf(M, taps, channel, fft_rotate, fft_filter)
snk = blocks.vector_sink_c()
tb.connect(add, s2ss)
for i in range(M):
tb.connect((s2ss, i), (pfb, i))
tb.connect(pfb, snk)
tb.run()
L = len(snk.data())
# Adjusted phase rotations for data
phase = [0.11058476216852586,
4.5108246571401693,
3.9739891674564594,
2.2820531095511924,
1.3782797467397869]
phase = phase[channel]
# Filter delay is the normal delay of each arm
tpf = math.ceil(len(taps) / float(M))
delay = -(tpf - 1.0) / 2.0
delay = int(delay)
# Create a time scale that's delayed to match the filter delay
t = [float(x) / fs for x in range(delay, L + delay)]
# Create known data as complex sinusoids for the baseband freq
# of the extracted channel is due to decimator output order.
expected_data = [
math.cos(
2. *
math.pi *
freqs[Mch] *
x +
phase) +
1j *
math.sin(
2. *
math.pi *
freqs[Mch] *
x +
phase) for x in t]
dst_data = snk.data()
return (dst_data, expected_data)
class test_pfb_decimator(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_000(self):
Ntest = 50
dst_data0, expected_data0 = run_test(self.tb, 0, False, False)
dst_data1, expected_data1 = run_test(self.tb, 0, False, True)
dst_data2, expected_data2 = run_test(self.tb, 0, True, False)
dst_data3, expected_data3 = run_test(self.tb, 0, True, True)
self.assertComplexTuplesAlmostEqual(
expected_data0[-Ntest:], dst_data0[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data1[-Ntest:], dst_data1[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data2[-Ntest:], dst_data2[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data3[-Ntest:], dst_data3[-Ntest:], 4)
def test_001(self):
Ntest = 50
dst_data0, expected_data0 = run_test(self.tb, 1, False, False)
dst_data1, expected_data1 = run_test(self.tb, 1, False, True)
dst_data2, expected_data2 = run_test(self.tb, 1, True, False)
dst_data3, expected_data3 = run_test(self.tb, 1, True, True)
self.assertComplexTuplesAlmostEqual(
expected_data0[-Ntest:], dst_data0[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data1[-Ntest:], dst_data1[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data2[-Ntest:], dst_data2[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data3[-Ntest:], dst_data3[-Ntest:], 4)
def test_002(self):
Ntest = 50
dst_data0, expected_data0 = run_test(self.tb, 2, False, False)
dst_data1, expected_data1 = run_test(self.tb, 2, False, True)
dst_data2, expected_data2 = run_test(self.tb, 2, True, False)
dst_data3, expected_data3 = run_test(self.tb, 2, True, True)
self.assertComplexTuplesAlmostEqual(
expected_data0[-Ntest:], dst_data0[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data1[-Ntest:], dst_data1[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data2[-Ntest:], dst_data2[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data3[-Ntest:], dst_data3[-Ntest:], 4)
def test_003(self):
Ntest = 50
dst_data0, expected_data0 = run_test(self.tb, 3, False, False)
dst_data1, expected_data1 = run_test(self.tb, 3, False, True)
dst_data2, expected_data2 = run_test(self.tb, 3, True, False)
dst_data3, expected_data3 = run_test(self.tb, 3, True, True)
self.assertComplexTuplesAlmostEqual(
expected_data0[-Ntest:], dst_data0[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data1[-Ntest:], dst_data1[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data2[-Ntest:], dst_data2[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data3[-Ntest:], dst_data3[-Ntest:], 4)
def test_004(self):
Ntest = 50
dst_data0, expected_data0 = run_test(self.tb, 4, False, False)
dst_data1, expected_data1 = run_test(self.tb, 4, False, True)
dst_data2, expected_data2 = run_test(self.tb, 4, True, False)
dst_data3, expected_data3 = run_test(self.tb, 4, True, True)
self.assertComplexTuplesAlmostEqual(
expected_data0[-Ntest:], dst_data0[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data1[-Ntest:], dst_data1[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data2[-Ntest:], dst_data2[-Ntest:], 4)
self.assertComplexTuplesAlmostEqual(
expected_data3[-Ntest:], dst_data3[-Ntest:], 4)
if __name__ == '__main__':
gr_unittest.run(test_pfb_decimator)
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.